From f608da8959172d90ef94323fb75c8e25f927e561 Mon Sep 17 00:00:00 2001 From: Lek Date: Wed, 20 Sep 2023 17:29:19 +0800 Subject: [PATCH 01/58] fix: fix official pg cluster (#5190) --- .../templates/cluster.yaml | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/deploy/official-postgresql-cluster/templates/cluster.yaml b/deploy/official-postgresql-cluster/templates/cluster.yaml index d49cc4926fa..e4b45617e62 100644 --- a/deploy/official-postgresql-cluster/templates/cluster.yaml +++ b/deploy/official-postgresql-cluster/templates/cluster.yaml @@ -1,15 +1,13 @@ {{- include "kblib.clusterCommon" . }} -clusterDefinitionRef: official-postgresql -componentSpecs: - - name: postgresql - componentDefRef: postgresql + clusterDefinitionRef: official-postgresql + componentSpecs: + - name: postgresql + componentDefRef: postgresql {{- include "kblib.componentMonitor" . | indent 6 }} {{- include "official-postgresql-cluster.replicaCount" . | indent 6 }} - enabledLogs: - - running - serviceAccountName: {{ include "kblib.serviceAccountName" . }} - switchPolicy: - type: Noop + serviceAccountName: {{ include "kblib.serviceAccountName" . }} + switchPolicy: + type: Noop {{- include "kblib.componentResources" . | indent 6 }} {{- include "kblib.componentStorages" . | indent 6 }} {{- include "kblib.componentServices" . | indent 6 }} \ No newline at end of file From fc5558e3142c132cdd475cd059cc655234424860 Mon Sep 17 00:00:00 2001 From: free6om Date: Wed, 20 Sep 2023 17:32:48 +0800 Subject: [PATCH 02/58] chore: builder pattern (#5197) build all objects by 'builder pattern' as it debuggable, testable, reusable and more readable. --- .../apps/components/base_stateful_hscale.go | 27 +- .../apps/components/consensus_workload.go | 7 +- .../apps/components/replication_workload.go | 7 +- .../apps/components/workload_builder.go | 20 +- .../apps/configuration/configuration_test.go | 8 +- .../apps/transformer_cluster_credential.go | 5 +- controllers/apps/transformer_rbac.go | 63 +-- controllers/apps/transformer_rbac_test.go | 15 +- .../dataprotection/backup_controller.go | 11 +- .../cmd/builder/template/component_wrapper.go | 5 +- internal/constant/const.go | 3 + internal/controller/builder/builder_backup.go | 49 ++ .../controller/builder/builder_backup_test.go | 50 ++ .../builder/builder_cluster_role_binding.go | 44 ++ .../builder_cluster_role_binding_test.go | 60 ++ .../controller/builder/builder_container.go | 102 ++++ .../builder/builder_container_test.go | 144 +++++ internal/controller/builder/builder_job.go | 10 + .../controller/builder/builder_job_test.go | 8 + internal/controller/builder/builder_pdb.go | 71 +++ .../controller/builder/builder_pdb_test.go | 60 ++ internal/controller/builder/builder_pod.go | 20 + .../controller/builder/builder_pod_test.go | 54 +- internal/controller/builder/builder_pvc.go | 15 + .../controller/builder/builder_pvc_test.go | 18 + .../builder/builder_role_binding.go | 44 ++ .../builder/builder_role_binding_test.go | 60 ++ .../builder/builder_service_account.go | 34 ++ .../builder/builder_service_account_test.go | 39 ++ .../builder/builder_volume_snapshot_class.go | 44 ++ .../builder_volume_snapshot_class_test.go | 48 ++ .../component/cue/probe_template.cue | 53 -- internal/controller/component/probe_utils.go | 60 +- .../controller/component/probe_utils_test.go | 4 +- .../controller/configuration/operator_test.go | 3 +- .../controller/configuration/pipeline_test.go | 3 +- .../configuration/template_wrapper.go | 2 +- internal/controller/factory/builder.go | 531 ++++++++++-------- internal/controller/factory/builder_test.go | 58 +- .../factory/cue/backup_job_template.cue | 53 -- .../factory/cue/backup_manifests_template.cue | 50 -- .../factory/cue/config_manager_sidecar.cue | 107 ---- .../factory/cue/config_template.cue | 64 --- .../factory/cue/conn_credential_template.cue | 49 -- .../factory/cue/env_config_template.cue | 50 -- .../factory/cue/headless_service_template.cue | 88 --- .../controller/factory/cue/pdb_template.cue | 60 -- .../controller/factory/cue/pvc_template.cue | 73 --- .../controller/factory/cue/rbac_template.cue | 87 --- .../factory/cue/restore_job_template.cue | 49 -- .../factory/cue/service_template.cue | 81 --- .../factory/cue/statefulset_template.cue | 85 --- .../factory/cue/volumesnapshotclass.cue | 29 - internal/controller/plan/prepare_test.go | 15 +- internal/controller/plan/restore.go | 7 +- 55 files changed, 1362 insertions(+), 1444 deletions(-) create mode 100644 internal/controller/builder/builder_backup.go create mode 100644 internal/controller/builder/builder_backup_test.go create mode 100644 internal/controller/builder/builder_cluster_role_binding.go create mode 100644 internal/controller/builder/builder_cluster_role_binding_test.go create mode 100644 internal/controller/builder/builder_container.go create mode 100644 internal/controller/builder/builder_container_test.go create mode 100644 internal/controller/builder/builder_pdb.go create mode 100644 internal/controller/builder/builder_pdb_test.go create mode 100644 internal/controller/builder/builder_role_binding.go create mode 100644 internal/controller/builder/builder_role_binding_test.go create mode 100644 internal/controller/builder/builder_service_account.go create mode 100644 internal/controller/builder/builder_service_account_test.go create mode 100644 internal/controller/builder/builder_volume_snapshot_class.go create mode 100644 internal/controller/builder/builder_volume_snapshot_class_test.go delete mode 100644 internal/controller/component/cue/probe_template.cue delete mode 100644 internal/controller/factory/cue/backup_job_template.cue delete mode 100644 internal/controller/factory/cue/backup_manifests_template.cue delete mode 100644 internal/controller/factory/cue/config_manager_sidecar.cue delete mode 100644 internal/controller/factory/cue/config_template.cue delete mode 100644 internal/controller/factory/cue/conn_credential_template.cue delete mode 100644 internal/controller/factory/cue/env_config_template.cue delete mode 100644 internal/controller/factory/cue/headless_service_template.cue delete mode 100644 internal/controller/factory/cue/pdb_template.cue delete mode 100644 internal/controller/factory/cue/pvc_template.cue delete mode 100644 internal/controller/factory/cue/rbac_template.cue delete mode 100644 internal/controller/factory/cue/restore_job_template.cue delete mode 100644 internal/controller/factory/cue/service_template.cue delete mode 100644 internal/controller/factory/cue/statefulset_template.cue delete mode 100644 internal/controller/factory/cue/volumesnapshotclass.cue diff --git a/controllers/apps/components/base_stateful_hscale.go b/controllers/apps/components/base_stateful_hscale.go index 0dca68793fd..cecb572ec86 100644 --- a/controllers/apps/components/base_stateful_hscale.go +++ b/controllers/apps/components/base_stateful_hscale.go @@ -275,10 +275,7 @@ func (d *baseDataClone) createPVCs(vcts []*corev1.PersistentVolumeClaimTemplate) } else if exist { continue } - pvc, err := factory.BuildPVC(d.cluster, d.component, vct, pvcKey, "") - if err != nil { - return nil, err - } + pvc := factory.BuildPVC(d.cluster, d.component, vct, pvcKey, "") objs = append(objs, pvc) } } @@ -373,10 +370,7 @@ func (d *snapshotDataClone) backup() ([]client.Object, error) { if backupPolicy == nil { return nil, intctrlutil.NewNotFound("not found any backup policy created by %s", backupPolicyTplName) } - backup, err := factory.BuildBackup(d.cluster, d.component, backupPolicy.Name, d.key, "snapshot") - if err != nil { - return nil, err - } + backup := factory.BuildBackup(d.cluster, d.component, backupPolicy.Name, d.key, "snapshot") objs = append(objs, backup) d.reqCtx.Recorder.Eventf(d.cluster, corev1.EventTypeNormal, "BackupJobCreate", "Create backupJob/%s", d.key.Name) return objs, nil @@ -482,10 +476,7 @@ func (d *snapshotDataClone) createPVCFromSnapshot( vct *corev1.PersistentVolumeClaimTemplate, pvcKey types.NamespacedName, snapshotName string) (client.Object, error) { - pvc, err := factory.BuildPVC(d.cluster, d.component, vct, pvcKey, snapshotName) - if err != nil { - return nil, err - } + pvc := factory.BuildPVC(d.cluster, d.component, vct, pvcKey, snapshotName) return pvc, nil } @@ -598,10 +589,7 @@ func (d *backupDataClone) backup() ([]client.Object, error) { if backupPolicy == nil { return nil, intctrlutil.NewNotFound("not found any backup policy created by %s", backupPolicyTplName) } - backup, err := factory.BuildBackup(d.cluster, d.component, backupPolicy.Name, d.key, "datafile") - if err != nil { - return nil, err - } + backup := factory.BuildBackup(d.cluster, d.component, backupPolicy.Name, d.key, "datafile") objs = append(objs, backup) return objs, nil } @@ -630,13 +618,10 @@ func (d *backupDataClone) restore(pvcKey types.NamespacedName) ([]client.Object, if err := d.cli.Get(d.reqCtx.Ctx, d.key, &backup); err != nil { return nil, err } - pvc, err := factory.BuildPVC(d.cluster, d.component, d.backupVCT(), pvcKey, "") - if err != nil { - return nil, err - } + pvc := factory.BuildPVC(d.cluster, d.component, d.backupVCT(), pvcKey, "") objs = append(objs, pvc) backupTool := &dataprotectionv1alpha1.BackupTool{} - if err = d.cli.Get(d.reqCtx.Ctx, client.ObjectKey{Name: backup.Status.BackupToolName}, backupTool); err != nil { + if err := d.cli.Get(d.reqCtx.Ctx, client.ObjectKey{Name: backup.Status.BackupToolName}, backupTool); err != nil { return nil, err } restoreMgr := plan.NewRestoreManager(d.reqCtx.Ctx, d.cli, d.cluster, nil) diff --git a/controllers/apps/components/consensus_workload.go b/controllers/apps/components/consensus_workload.go index 95e9a383c11..ba1e0e579e5 100644 --- a/controllers/apps/components/consensus_workload.go +++ b/controllers/apps/components/consensus_workload.go @@ -38,10 +38,7 @@ func (b *consensusComponentWorkloadBuilder) BuildWorkload() componentWorkloadBui func (b *consensusComponentWorkloadBuilder) BuildService() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - svcList, err := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - if err != nil { - return nil, err - } + svcList := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) objs := make([]client.Object, 0, len(svcList)) leader := b.Comp.GetConsensusSpec().Leader for _, svc := range svcList { @@ -50,7 +47,7 @@ func (b *consensusComponentWorkloadBuilder) BuildService() componentWorkloadBuil } objs = append(objs, svc) } - return objs, err + return objs, nil } return b.BuildWrapper(buildfn) } diff --git a/controllers/apps/components/replication_workload.go b/controllers/apps/components/replication_workload.go index d65f78d6b20..073ab8e78d9 100644 --- a/controllers/apps/components/replication_workload.go +++ b/controllers/apps/components/replication_workload.go @@ -38,16 +38,13 @@ func (b *replicationComponentWorkloadBuilder) BuildWorkload() componentWorkloadB func (b *replicationComponentWorkloadBuilder) BuildService() componentWorkloadBuilder { buildFn := func() ([]client.Object, error) { - svcList, err := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - if err != nil { - return nil, err - } + svcList := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) objs := make([]client.Object, 0, len(svcList)) for _, svc := range svcList { svc.Spec.Selector[constant.RoleLabelKey] = constant.Primary objs = append(objs, svc) } - return objs, err + return objs, nil } return b.BuildWrapper(buildFn) } diff --git a/controllers/apps/components/workload_builder.go b/controllers/apps/components/workload_builder.go index 2f319e80a16..5e680085d7e 100644 --- a/controllers/apps/components/workload_builder.go +++ b/controllers/apps/components/workload_builder.go @@ -66,10 +66,10 @@ type componentWorkloadBuilderBase struct { func (b *componentWorkloadBuilderBase) BuildEnv() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - envCfg, err := factory.BuildEnvConfig(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) + envCfg := factory.BuildEnvConfig(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) b.EnvConfig = envCfg b.LocalObjs = append(b.LocalObjs, envCfg) - return []client.Object{envCfg}, err + return []client.Object{envCfg}, nil } return b.BuildWrapper(buildfn) } @@ -124,10 +124,7 @@ func (b *componentWorkloadBuilderBase) BuildPDB() componentWorkloadBuilder { // conditionally build PodDisruptionBudget synthesizedComponent := b.Comp.GetSynthesizedComponent() if synthesizedComponent.MinAvailable != nil { - pdb, err := factory.BuildPDB(b.Comp.GetCluster(), synthesizedComponent) - if err != nil { - return nil, err - } + pdb := factory.BuildPDB(b.Comp.GetCluster(), synthesizedComponent) return []client.Object{pdb}, nil } else { panic("this shouldn't happen") @@ -169,23 +166,20 @@ func (b *componentWorkloadBuilderBase) BuildVolumeMount() componentWorkloadBuild func (b *componentWorkloadBuilderBase) BuildService() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - svcList, err := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - if err != nil { - return nil, err - } + svcList := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) objs := make([]client.Object, 0) for _, svc := range svcList { objs = append(objs, svc) } - return objs, err + return objs, nil } return b.BuildWrapper(buildfn) } func (b *componentWorkloadBuilderBase) BuildHeadlessService() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - svc, err := factory.BuildHeadlessSvc(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - return []client.Object{svc}, err + svc := factory.BuildHeadlessSvc(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) + return []client.Object{svc}, nil } return b.BuildWrapper(buildfn) } diff --git a/controllers/apps/configuration/configuration_test.go b/controllers/apps/configuration/configuration_test.go index e85b507d0e1..bd711a76826 100644 --- a/controllers/apps/configuration/configuration_test.go +++ b/controllers/apps/configuration/configuration_test.go @@ -113,13 +113,11 @@ func mockReconcileResource() (*corev1.ConfigMap, *appsv1alpha1.ConfigConstraint, clusterDefObj.Name, clusterVersionObj.Name). AddComponent(statefulCompName, statefulCompDefName).Create(&testCtx).GetObject() - container := corev1.Container{ - Name: "mock-container", - VolumeMounts: []corev1.VolumeMount{{ + container := *builder.NewContainerBuilder("mock-container"). + AddVolumeMounts(corev1.VolumeMount{ Name: configVolumeName, MountPath: "/mnt/config", - }}, - } + }).GetObject() _ = testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, statefulSetName, clusterObj.Name, statefulCompName). AddConfigmapVolume(configVolumeName, configmap.Name). AddContainer(container). diff --git a/controllers/apps/transformer_cluster_credential.go b/controllers/apps/transformer_cluster_credential.go index 6380d93a3f4..3194a01bc6c 100644 --- a/controllers/apps/transformer_cluster_credential.go +++ b/controllers/apps/transformer_cluster_credential.go @@ -73,10 +73,7 @@ func (c *ClusterCredentialTransformer) Transform(ctx graph.TransformContext, dag } } if synthesizedComponent != nil { - secret, err := factory.BuildConnCredential(transCtx.ClusterDef, cluster, synthesizedComponent) - if err != nil { - return err - } + secret := factory.BuildConnCredential(transCtx.ClusterDef, cluster, synthesizedComponent) if secret != nil { ictrltypes.LifecycleObjectCreate(dag, secret, root) } diff --git a/controllers/apps/transformer_rbac.go b/controllers/apps/transformer_rbac.go index 6ba7e352e61..079c1c0290b 100644 --- a/controllers/apps/transformer_rbac.go +++ b/controllers/apps/transformer_rbac.go @@ -46,12 +46,6 @@ type RBACTransformer struct{} var _ graph.Transformer = &RBACTransformer{} -const ( - RBACRoleName = "kubeblocks-cluster-pod-role" - RBACClusterRoleName = "kubeblocks-volume-protection-pod-role" - ServiceAccountKind = "ServiceAccount" -) - func (c *RBACTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { transCtx, _ := ctx.(*ClusterTransformContext) cluster := transCtx.Cluster @@ -87,18 +81,10 @@ func (c *RBACTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) return nil } - rb, err := buildReloBinding(cluster, serviceAccounts) - if err != nil { - return err - } - + rb := buildRoleBinding(cluster, serviceAccounts) parentVertex = ictrltypes.LifecycleObjectCreate(dag, rb, parentVertex) if len(serviceAccountsNeedCrb) > 0 { - crb, err := buildClusterReloBinding(cluster, serviceAccountsNeedCrb) - if err != nil { - return err - } - + crb := buildClusterRoleBinding(cluster, serviceAccountsNeedCrb) parentVertex = ictrltypes.LifecycleObjectCreate(dag, crb, parentVertex) } @@ -190,14 +176,14 @@ func isClusterRoleBindingExist(transCtx *ClusterTransformContext, serviceAccount return false } - if crb.RoleRef.Name != RBACClusterRoleName { + if crb.RoleRef.Name != constant.RBACClusterRoleName { transCtx.Logger.V(1).Info("rbac manager: ClusterRole not match", "ClusterRole", - RBACClusterRoleName, "clusterrolebinding.RoleRef", crb.RoleRef.Name) + constant.RBACClusterRoleName, "clusterrolebinding.RoleRef", crb.RoleRef.Name) } isServiceAccountMatch := false for _, sub := range crb.Subjects { - if sub.Kind == ServiceAccountKind && sub.Name == serviceAccountName { + if sub.Kind == rbacv1.ServiceAccountKind && sub.Name == serviceAccountName { isServiceAccountMatch = true break } @@ -228,14 +214,14 @@ func isRoleBindingExist(transCtx *ClusterTransformContext, serviceAccountName st return false } - if rb.RoleRef.Name != RBACClusterRoleName { + if rb.RoleRef.Name != constant.RBACClusterRoleName { transCtx.Logger.V(1).Info("rbac manager: ClusterRole not match", "ClusterRole", - RBACRoleName, "rolebinding.RoleRef", rb.RoleRef.Name) + constant.RBACRoleName, "rolebinding.RoleRef", rb.RoleRef.Name) } isServiceAccountMatch := false for _, sub := range rb.Subjects { - if sub.Kind == ServiceAccountKind && sub.Name == serviceAccountName { + if sub.Kind == rbacv1.ServiceAccountKind && sub.Name == serviceAccountName { isServiceAccountMatch = true break } @@ -321,11 +307,8 @@ func buildServiceAccounts(transCtx *ClusterTransformContext, componentSpecs []ap if _, ok := serviceAccounts[serviceAccountName]; ok { continue } - serviceAccount, err := factory.BuildServiceAccount(cluster) + serviceAccount := factory.BuildServiceAccount(cluster) serviceAccount.Name = serviceAccountName - if err != nil { - return nil, nil, err - } serviceAccounts[serviceAccountName] = serviceAccount if isVolumeProtectionEnabled(clusterDef, &compSpec) { @@ -335,46 +318,40 @@ func buildServiceAccounts(transCtx *ClusterTransformContext, componentSpecs []ap return serviceAccounts, serviceAccountsNeedCrb, nil } -func buildReloBinding(cluster *appsv1alpha1.Cluster, serviceAccounts map[string]*corev1.ServiceAccount) (*rbacv1.RoleBinding, error) { - roleBinding, err := factory.BuildRoleBinding(cluster) - if err != nil { - return nil, err - } +func buildRoleBinding(cluster *appsv1alpha1.Cluster, serviceAccounts map[string]*corev1.ServiceAccount) *rbacv1.RoleBinding { + roleBinding := factory.BuildRoleBinding(cluster) roleBinding.Subjects = []rbacv1.Subject{} for saName := range serviceAccounts { subject := rbacv1.Subject{ Name: saName, Namespace: cluster.Namespace, - Kind: "ServiceAccount", + Kind: rbacv1.ServiceAccountKind, } roleBinding.Subjects = append(roleBinding.Subjects, subject) } - return roleBinding, nil + return roleBinding } -func buildClusterReloBinding(cluster *appsv1alpha1.Cluster, serviceAccounts map[string]*corev1.ServiceAccount) (*rbacv1.ClusterRoleBinding, error) { - clusterRoleBinding, err := factory.BuildClusterRoleBinding(cluster) - if err != nil { - return nil, err - } +func buildClusterRoleBinding(cluster *appsv1alpha1.Cluster, serviceAccounts map[string]*corev1.ServiceAccount) *rbacv1.ClusterRoleBinding { + clusterRoleBinding := factory.BuildClusterRoleBinding(cluster) clusterRoleBinding.Subjects = []rbacv1.Subject{} for saName := range serviceAccounts { subject := rbacv1.Subject{ Name: saName, Namespace: cluster.Namespace, - Kind: "ServiceAccount", + Kind: rbacv1.ServiceAccountKind, } clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, subject) } - return clusterRoleBinding, nil + return clusterRoleBinding } func createSaVertex(serviceAccounts map[string]*corev1.ServiceAccount, dag *graph.DAG, parentVertex *ictrltypes.LifecycleVertex) []*ictrltypes.LifecycleVertex { - saVertexs := []*ictrltypes.LifecycleVertex{} + var saVertexes []*ictrltypes.LifecycleVertex for _, sa := range serviceAccounts { // serviceaccount must be created before rolebinding and clusterrolebinding saVertex := ictrltypes.LifecycleObjectCreate(dag, sa, parentVertex) - saVertexs = append(saVertexs, saVertex) + saVertexes = append(saVertexes, saVertex) } - return saVertexs + return saVertexes } diff --git a/controllers/apps/transformer_rbac_test.go b/controllers/apps/transformer_rbac_test.go index ee35170fb77..8cb6332cc7d 100644 --- a/controllers/apps/transformer_rbac_test.go +++ b/controllers/apps/transformer_rbac_test.go @@ -110,12 +110,10 @@ var _ = Describe("object rbac transformer test.", func() { &corev1.ServiceAccount{}, false)).Should(Succeed()) Expect(transformer.Transform(transCtx, dag)).Should(BeNil()) - serviceAccount, err := factory.BuildServiceAccount(cluster) - Expect(err).Should(BeNil()) + serviceAccount := factory.BuildServiceAccount(cluster) serviceAccount.Name = serviceAccountName - roleBinding, err := factory.BuildRoleBinding(cluster) - Expect(err).Should(BeNil()) + roleBinding := factory.BuildRoleBinding(cluster) roleBinding.Subjects[0].Name = serviceAccountName dagExpected := mockDAG(cluster) @@ -130,16 +128,13 @@ var _ = Describe("object rbac transformer test.", func() { &corev1.ServiceAccount{}, false)).Should(Succeed()) Expect(transformer.Transform(transCtx, dag)).Should(BeNil()) - serviceAccount, err := factory.BuildServiceAccount(cluster) - Expect(err).Should(BeNil()) + serviceAccount := factory.BuildServiceAccount(cluster) serviceAccount.Name = serviceAccountName - roleBinding, err := factory.BuildRoleBinding(cluster) - Expect(err).Should(BeNil()) + roleBinding := factory.BuildRoleBinding(cluster) roleBinding.Subjects[0].Name = serviceAccountName - clusterRoleBinding, err := factory.BuildClusterRoleBinding(cluster) - Expect(err).Should(BeNil()) + clusterRoleBinding := factory.BuildClusterRoleBinding(cluster) clusterRoleBinding.Subjects[0].Name = serviceAccountName dagExpected := mockDAG(cluster) diff --git a/controllers/dataprotection/backup_controller.go b/controllers/dataprotection/backup_controller.go index 43047a3ef29..b6284624f6c 100644 --- a/controllers/dataprotection/backup_controller.go +++ b/controllers/dataprotection/backup_controller.go @@ -1251,11 +1251,8 @@ func (r *BackupReconciler) getVolumeSnapshotClassOrCreate(ctx context.Context, s } // not found matched volume snapshot class, create one vscName := fmt.Sprintf("vsc-%s-%s", storageClassName, storageClassObj.UID[:8]) - newVSC, err := ctrlbuilder.BuildVolumeSnapshotClass(vscName, storageClassObj.Provisioner) - if err != nil { - return err - } - if err = r.snapshotCli.Create(newVSC); err != nil { + newVSC := ctrlbuilder.BuildVolumeSnapshotClass(vscName, storageClassObj.Provisioner) + if err := r.snapshotCli.Create(newVSC); err != nil { return err } *vsc = *newVSC @@ -1335,9 +1332,7 @@ func (r *BackupReconciler) createMetadataCollectionJob(reqCtx intctrlutil.Reques if err != nil { return err } - if job, err = ctrlbuilder.BuildBackupManifestsJob(key, backup, &jobPodSpec); err != nil { - return err - } + job = ctrlbuilder.BuildBackupManifestsJob(key, backup, &jobPodSpec) msg := fmt.Sprintf("creating job %s", key.Name) r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatingJob-"+key.Name, msg) return client.IgnoreAlreadyExists(r.Client.Create(reqCtx.Ctx, job)) diff --git a/internal/cli/cmd/builder/template/component_wrapper.go b/internal/cli/cmd/builder/template/component_wrapper.go index dbda2085e23..7b1df33f7cb 100644 --- a/internal/cli/cmd/builder/template/component_wrapper.go +++ b/internal/cli/cmd/builder/template/component_wrapper.go @@ -328,10 +328,7 @@ func generateComponentObjects(w *templateRenderWorkflow, ctx intctrlutil.Request if err != nil { return nil, nil, err } - secret, err := factory.BuildConnCredential(w.clusterDefObj, cluster, component.GetSynthesizedComponent()) - if err != nil { - return nil, nil, err - } + secret := factory.BuildConnCredential(w.clusterDefObj, cluster, component.GetSynthesizedComponent()) cli.AppendMockObjects(secret) if err = component.Create(ctx, cli); err != nil { return nil, nil, err diff --git a/internal/constant/const.go b/internal/constant/const.go index 502cd28101b..d6fdb10c92e 100644 --- a/internal/constant/const.go +++ b/internal/constant/const.go @@ -176,6 +176,9 @@ const ( // IgnoreResourceConstraint is used to specify whether to ignore the resource constraint IgnoreResourceConstraint = "resource.kubeblocks.io/ignore-constraint" + + RBACRoleName = "kubeblocks-cluster-pod-role" + RBACClusterRoleName = "kubeblocks-volume-protection-pod-role" ) const ( diff --git a/internal/controller/builder/builder_backup.go b/internal/controller/builder/builder_backup.go new file mode 100644 index 00000000000..2faecea3375 --- /dev/null +++ b/internal/controller/builder/builder_backup.go @@ -0,0 +1,49 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + dataprotection "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" +) + +type BackupBuilder struct { + BaseBuilder[dataprotection.Backup, *dataprotection.Backup, BackupBuilder] +} + +func NewBackupBuilder(namespace, name string) *BackupBuilder { + builder := &BackupBuilder{} + builder.init(namespace, name, &dataprotection.Backup{}, builder) + return builder +} + +func (builder *BackupBuilder) SetBackupPolicyName(policyName string) *BackupBuilder { + builder.get().Spec.BackupPolicyName = policyName + return builder +} + +func (builder *BackupBuilder) SetBackType(backupType dataprotection.BackupType) *BackupBuilder { + builder.get().Spec.BackupType = backupType + return builder +} + +func (builder *BackupBuilder) SetParentBackupName(parent string) *BackupBuilder { + builder.get().Spec.ParentBackupName = parent + return builder +} diff --git a/internal/controller/builder/builder_backup_test.go b/internal/controller/builder/builder_backup_test.go new file mode 100644 index 00000000000..4c8b214fd1c --- /dev/null +++ b/internal/controller/builder/builder_backup_test.go @@ -0,0 +1,50 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + dataprotection "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" +) + +var _ = Describe("backup builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + policyName := "policyName" + backupType := dataprotection.BackupTypeSnapshot + parent := "parent" + backup := NewBackupBuilder(ns, name). + SetBackupPolicyName(policyName). + SetBackType(backupType). + SetParentBackupName(parent). + GetObject() + + Expect(backup.Name).Should(Equal(name)) + Expect(backup.Namespace).Should(Equal(ns)) + Expect(backup.Spec.BackupPolicyName).Should(Equal(policyName)) + Expect(backup.Spec.BackupType).Should(Equal(backupType)) + Expect(backup.Spec.ParentBackupName).Should(Equal(parent)) + }) +}) diff --git a/internal/controller/builder/builder_cluster_role_binding.go b/internal/controller/builder/builder_cluster_role_binding.go new file mode 100644 index 00000000000..55db20863bc --- /dev/null +++ b/internal/controller/builder/builder_cluster_role_binding.go @@ -0,0 +1,44 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + rbacv1 "k8s.io/api/rbac/v1" +) + +type ClusterRoleBindingBuilder struct { + BaseBuilder[rbacv1.ClusterRoleBinding, *rbacv1.ClusterRoleBinding, ClusterRoleBindingBuilder] +} + +func NewClusterRoleBindingBuilder(namespace, name string) *ClusterRoleBindingBuilder { + builder := &ClusterRoleBindingBuilder{} + builder.init(namespace, name, &rbacv1.ClusterRoleBinding{}, builder) + return builder +} + +func (builder *ClusterRoleBindingBuilder) SetRoleRef(roleRef rbacv1.RoleRef) *ClusterRoleBindingBuilder { + builder.get().RoleRef = roleRef + return builder +} + +func (builder *ClusterRoleBindingBuilder) AddSubjects(subjects ...rbacv1.Subject) *ClusterRoleBindingBuilder { + builder.get().Subjects = append(builder.get().Subjects, subjects...) + return builder +} diff --git a/internal/controller/builder/builder_cluster_role_binding_test.go b/internal/controller/builder/builder_cluster_role_binding_test.go new file mode 100644 index 00000000000..5ef10f07810 --- /dev/null +++ b/internal/controller/builder/builder_cluster_role_binding_test.go @@ -0,0 +1,60 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/apecloud/kubeblocks/internal/constant" +) + +var _ = Describe("cluster role binding builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + roleRef := rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: constant.RBACRoleName, + } + subject := rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: ns, + Name: fmt.Sprintf("kb-%s", name), + } + clusterRoleBinding := NewClusterRoleBindingBuilder(ns, name). + SetRoleRef(roleRef). + AddSubjects(subject). + GetObject() + + Expect(clusterRoleBinding.Name).Should(Equal(name)) + Expect(clusterRoleBinding.Namespace).Should(Equal(ns)) + Expect(clusterRoleBinding.RoleRef).Should(Equal(roleRef)) + Expect(clusterRoleBinding.Subjects).Should(HaveLen(1)) + Expect(clusterRoleBinding.Subjects[0]).Should(Equal(subject)) + }) +}) diff --git a/internal/controller/builder/builder_container.go b/internal/controller/builder/builder_container.go new file mode 100644 index 00000000000..fc483fa2d93 --- /dev/null +++ b/internal/controller/builder/builder_container.go @@ -0,0 +1,102 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + corev1 "k8s.io/api/core/v1" +) + +type ContainerBuilder struct { + object *corev1.Container +} + +func NewContainerBuilder(name string) *ContainerBuilder { + builder := &ContainerBuilder{} + builder.init(name, &corev1.Container{}) + return builder +} + +func (builder *ContainerBuilder) init(name string, obj *corev1.Container) { + obj.Name = name + builder.object = obj +} + +func (builder *ContainerBuilder) get() *corev1.Container { + return builder.object +} + +func (builder *ContainerBuilder) GetObject() *corev1.Container { + return builder.object +} + +func (builder *ContainerBuilder) AddCommands(commands ...string) *ContainerBuilder { + builder.get().Command = append(builder.get().Command, commands...) + return builder +} + +func (builder *ContainerBuilder) AddArgs(args ...string) *ContainerBuilder { + builder.get().Args = append(builder.get().Args, args...) + return builder +} + +func (builder *ContainerBuilder) AddEnv(env ...corev1.EnvVar) *ContainerBuilder { + builder.get().Env = append(builder.get().Env, env...) + return builder +} + +func (builder *ContainerBuilder) SetImage(image string) *ContainerBuilder { + builder.get().Image = image + return builder +} + +func (builder *ContainerBuilder) SetImagePullPolicy(policy corev1.PullPolicy) *ContainerBuilder { + builder.get().ImagePullPolicy = policy + return builder +} + +func (builder *ContainerBuilder) AddVolumeMounts(mounts ...corev1.VolumeMount) *ContainerBuilder { + builder.get().VolumeMounts = append(builder.get().VolumeMounts, mounts...) + return builder +} + +func (builder *ContainerBuilder) SetSecurityContext(ctx corev1.SecurityContext) *ContainerBuilder { + builder.get().SecurityContext = &ctx + return builder +} + +func (builder *ContainerBuilder) SetResources(resources corev1.ResourceRequirements) *ContainerBuilder { + builder.get().Resources = resources + return builder +} + +func (builder *ContainerBuilder) AddPorts(ports ...corev1.ContainerPort) *ContainerBuilder { + builder.get().Ports = append(builder.get().Ports, ports...) + return builder +} + +func (builder *ContainerBuilder) SetReadinessProbe(probe corev1.Probe) *ContainerBuilder { + builder.get().ReadinessProbe = &probe + return builder +} + +func (builder *ContainerBuilder) SetStartupProbe(probe corev1.Probe) *ContainerBuilder { + builder.get().StartupProbe = &probe + return builder +} diff --git a/internal/controller/builder/builder_container_test.go b/internal/controller/builder/builder_container_test.go new file mode 100644 index 00000000000..f0316170148 --- /dev/null +++ b/internal/controller/builder/builder_container_test.go @@ -0,0 +1,144 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" + + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("container builder", func() { + It("should work well", func() { + const name = "foo" + commands := []string{ + name, + "--bar", + } + args := []string{ + "arg1", + "arg2", + } + env := []corev1.EnvVar{ + { + Name: name, + Value: "bar", + }, + { + Name: "hello", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + } + image := "foo:latest" + policy := corev1.PullAlways + mounts := []corev1.VolumeMount{ + { + Name: name, + MountPath: "/data/foo", + }, + { + Name: "bar", + ReadOnly: true, + MountPath: "/log/bar", + }, + } + user := int64(0) + ctx := corev1.SecurityContext{ + RunAsUser: &user, + } + + resourceQuantityValue := func(value string) resource.Quantity { + quantity, _ := resource.ParseQuantity(value) + return quantity + } + resources := corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resourceQuantityValue("0.5"), + corev1.ResourceMemory: resourceQuantityValue("500m"), + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resourceQuantityValue("0.5"), + corev1.ResourceMemory: resourceQuantityValue("500m"), + }, + } + ports := []corev1.ContainerPort{ + { + Name: name, + ContainerPort: 12345, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "bar", + ContainerPort: 54321, + Protocol: corev1.ProtocolUDP, + }, + } + readinessProbe := corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{}, + }, + }, + } + startupProbe := corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(12345), + }, + }, + } + container := NewContainerBuilder(name). + AddCommands(commands...). + AddArgs(args...). + AddEnv(env...). + SetImage(image). + SetImagePullPolicy(policy). + AddVolumeMounts(mounts...). + SetSecurityContext(ctx). + SetResources(resources). + AddPorts(ports...). + SetReadinessProbe(readinessProbe). + SetStartupProbe(startupProbe). + GetObject() + + Expect(container.Name).Should(Equal(name)) + Expect(container.Command).Should(Equal(commands)) + Expect(container.Args).Should(Equal(args)) + Expect(container.Env).Should(Equal(env)) + Expect(container.Image).Should(Equal(image)) + Expect(container.ImagePullPolicy).Should(Equal(policy)) + Expect(container.VolumeMounts).Should(Equal(mounts)) + Expect(container.SecurityContext).ShouldNot(BeNil()) + Expect(*container.SecurityContext).Should(Equal(ctx)) + Expect(container.Resources).Should(Equal(resources)) + Expect(container.Ports).Should(Equal(ports)) + Expect(container.ReadinessProbe).ShouldNot(BeNil()) + Expect(*container.ReadinessProbe).Should(Equal(readinessProbe)) + Expect(container.StartupProbe).ShouldNot(BeNil()) + Expect(*container.StartupProbe).Should(Equal(startupProbe)) + }) +}) diff --git a/internal/controller/builder/builder_job.go b/internal/controller/builder/builder_job.go index 8c8d93dd1e9..5992857598c 100644 --- a/internal/controller/builder/builder_job.go +++ b/internal/controller/builder/builder_job.go @@ -56,3 +56,13 @@ func (builder *JobBuilder) SetSuspend(suspend bool) *JobBuilder { builder.get().Spec.Suspend = &suspend return builder } + +func (builder *JobBuilder) SetBackoffLimit(limit int32) *JobBuilder { + builder.get().Spec.BackoffLimit = &limit + return builder +} + +func (builder *JobBuilder) SetTTLSecondsAfterFinished(ttl int32) *JobBuilder { + builder.get().Spec.TTLSecondsAfterFinished = &ttl + return builder +} diff --git a/internal/controller/builder/builder_job_test.go b/internal/controller/builder/builder_job_test.go index fcc48fd4212..5f70ce9136c 100644 --- a/internal/controller/builder/builder_job_test.go +++ b/internal/controller/builder/builder_job_test.go @@ -51,10 +51,14 @@ var _ = Describe("job builder", func() { } selectorKey, selectorValue := "foo", "bar" suspend := true + limit := int32(5) + ttl := int32(12) job := NewJobBuilder(ns, name). SetPodTemplateSpec(template). AddSelector(selectorKey, selectorValue). SetSuspend(suspend). + SetBackoffLimit(limit). + SetTTLSecondsAfterFinished(ttl). GetObject() Expect(job.Name).Should(Equal(name)) @@ -65,5 +69,9 @@ var _ = Describe("job builder", func() { Expect(job.Spec.Selector.MatchLabels[selectorKey]).Should(Equal(selectorValue)) Expect(job.Spec.Suspend).ShouldNot(BeNil()) Expect(*job.Spec.Suspend).Should(Equal(suspend)) + Expect(job.Spec.BackoffLimit).ShouldNot(BeNil()) + Expect(*job.Spec.BackoffLimit).Should(Equal(limit)) + Expect(job.Spec.TTLSecondsAfterFinished).ShouldNot(BeNil()) + Expect(*job.Spec.TTLSecondsAfterFinished).Should(Equal(ttl)) }) }) diff --git a/internal/controller/builder/builder_pdb.go b/internal/controller/builder/builder_pdb.go new file mode 100644 index 00000000000..d91fd5409ac --- /dev/null +++ b/internal/controller/builder/builder_pdb.go @@ -0,0 +1,71 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +type PDBBuilder struct { + BaseBuilder[policyv1.PodDisruptionBudget, *policyv1.PodDisruptionBudget, PDBBuilder] +} + +func NewPDBBuilder(namespace, name string) *PDBBuilder { + builder := &PDBBuilder{} + builder.init(namespace, name, &policyv1.PodDisruptionBudget{}, builder) + return builder +} + +func (builder *PDBBuilder) SetMinAvailable(minAvailable intstr.IntOrString) *PDBBuilder { + builder.get().Spec.MinAvailable = &minAvailable + return builder +} + +func (builder *PDBBuilder) AddSelector(key, value string) *PDBBuilder { + selector := builder.get().Spec.Selector + if selector == nil { + selector = &metav1.LabelSelector{ + MatchLabels: map[string]string{}, + } + } + selector.MatchLabels[key] = value + builder.get().Spec.Selector = selector + return builder +} + +func (builder *PDBBuilder) AddSelectors(keyValues ...string) *PDBBuilder { + return builder.AddSelectorsInMap(WithMap(keyValues...)) +} + +func (builder *PDBBuilder) AddSelectorsInMap(keyValues map[string]string) *PDBBuilder { + selector := builder.get().Spec.Selector + if selector == nil { + selector = &metav1.LabelSelector{ + MatchLabels: map[string]string{}, + } + } + for k, v := range keyValues { + selector.MatchLabels[k] = v + } + builder.get().Spec.Selector = selector + return builder +} diff --git a/internal/controller/builder/builder_pdb_test.go b/internal/controller/builder/builder_pdb_test.go new file mode 100644 index 00000000000..87709ab0f01 --- /dev/null +++ b/internal/controller/builder/builder_pdb_test.go @@ -0,0 +1,60 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var _ = Describe("pdb builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + selectorKey1, selectorValue1 = "foo-1", "bar-1" + selectorKey2, selectorValue2 = "foo-2", "bar-2" + selectorKey3, selectorValue3 = "foo-3", "bar-3" + selectorKey4, selectorValue4 = "foo-4", "bar-4" + ) + selectors := map[string]string{selectorKey4: selectorValue4} + minAvailable := intstr.FromInt(3) + pdb := NewPDBBuilder(ns, name). + AddSelector(selectorKey1, selectorValue1). + AddSelectors(selectorKey2, selectorValue2, selectorKey3, selectorValue3). + AddSelectorsInMap(selectors). + SetMinAvailable(minAvailable). + GetObject() + + Expect(pdb.Name).Should(Equal(name)) + Expect(pdb.Namespace).Should(Equal(ns)) + Expect(pdb.Spec.Selector).ShouldNot(BeNil()) + Expect(pdb.Spec.Selector.MatchLabels).ShouldNot(BeNil()) + Expect(pdb.Spec.Selector).ShouldNot(BeNil()) + Expect(pdb.Spec.Selector.MatchLabels).Should(HaveLen(4)) + Expect(pdb.Spec.Selector.MatchLabels[selectorKey1]).Should(Equal(selectorValue1)) + Expect(pdb.Spec.Selector.MatchLabels[selectorKey2]).Should(Equal(selectorValue2)) + Expect(pdb.Spec.Selector.MatchLabels[selectorKey3]).Should(Equal(selectorValue3)) + Expect(pdb.Spec.Selector.MatchLabels[selectorKey4]).Should(Equal(selectorValue4)) + Expect(pdb.Spec.MinAvailable).ShouldNot(BeNil()) + Expect(*pdb.Spec.MinAvailable).Should(Equal(minAvailable)) + }) +}) diff --git a/internal/controller/builder/builder_pod.go b/internal/controller/builder/builder_pod.go index 04b32f31ca6..39b4f9dfa35 100644 --- a/internal/controller/builder/builder_pod.go +++ b/internal/controller/builder/builder_pod.go @@ -42,3 +42,23 @@ func (builder *PodBuilder) AddContainer(container corev1.Container) *PodBuilder builder.get().Spec.Containers = containers return builder } + +func (builder *PodBuilder) AddVolumes(volumes ...corev1.Volume) *PodBuilder { + builder.get().Spec.Volumes = append(builder.get().Spec.Volumes, volumes...) + return builder +} + +func (builder *PodBuilder) SetRestartPolicy(policy corev1.RestartPolicy) *PodBuilder { + builder.get().Spec.RestartPolicy = policy + return builder +} + +func (builder *PodBuilder) SetSecurityContext(ctx corev1.PodSecurityContext) *PodBuilder { + builder.get().Spec.SecurityContext = &ctx + return builder +} + +func (builder *PodBuilder) AddTolerations(tolerations ...corev1.Toleration) *PodBuilder { + builder.get().Spec.Tolerations = append(builder.get().Spec.Tolerations, tolerations...) + return builder +} diff --git a/internal/controller/builder/builder_pod_test.go b/internal/controller/builder/builder_pod_test.go index 402715bdfed..be7ecf6dbdc 100644 --- a/internal/controller/builder/builder_pod_test.go +++ b/internal/controller/builder/builder_pod_test.go @@ -31,33 +31,48 @@ var _ = Describe("pod builder", func() { name := "foo" ns := "default" port := int32(12345) - container := corev1.Container{ - Name: "foo-1", - Image: "bar-2", - Ports: []corev1.ContainerPort{ - { - Name: "foo-1", + container := *NewContainerBuilder("foo-1"). + SetImage("bar-1"). + AddPorts(corev1.ContainerPort{ + Name: "foo-1", + Protocol: corev1.ProtocolTCP, + ContainerPort: port, + }).GetObject() + containers := []corev1.Container{ + *NewContainerBuilder("foo-2").SetImage("bar-2"). + AddPorts(corev1.ContainerPort{ + Name: "foo-2", Protocol: corev1.ProtocolTCP, ContainerPort: port, + }).GetObject(), + } + volumes := []corev1.Volume{ + { + Name: "data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }, } - containers := []corev1.Container{ + restartPolicy := corev1.RestartPolicyOnFailure + user := int64(0) + ctx := corev1.PodSecurityContext{ + RunAsUser: &user, + } + tolerations := []corev1.Toleration{ { - Name: "foo-2", - Image: "bar-2", - Ports: []corev1.ContainerPort{ - { - Name: "foo-2", - Protocol: corev1.ProtocolTCP, - ContainerPort: port, - }, - }, + Key: "node", + Operator: corev1.TolerationOpEqual, + Value: "node-0", }, } pod := NewPodBuilder(ns, name). SetContainers(containers). AddContainer(container). + AddVolumes(volumes...). + SetRestartPolicy(restartPolicy). + SetSecurityContext(ctx). + AddTolerations(tolerations...). GetObject() Expect(pod.Name).Should(Equal(name)) @@ -65,5 +80,12 @@ var _ = Describe("pod builder", func() { Expect(pod.Spec.Containers).Should(HaveLen(2)) Expect(pod.Spec.Containers[0]).Should(Equal(containers[0])) Expect(pod.Spec.Containers[1]).Should(Equal(container)) + Expect(pod.Spec.Volumes).Should(HaveLen(1)) + Expect(pod.Spec.Volumes[0]).Should(Equal(volumes[0])) + Expect(pod.Spec.RestartPolicy).Should(Equal(restartPolicy)) + Expect(pod.Spec.SecurityContext).ShouldNot(BeNil()) + Expect(*pod.Spec.SecurityContext).Should(Equal(ctx)) + Expect(pod.Spec.Tolerations).Should(HaveLen(1)) + Expect(pod.Spec.Tolerations[0]).Should(Equal(tolerations[0])) }) }) diff --git a/internal/controller/builder/builder_pvc.go b/internal/controller/builder/builder_pvc.go index 0f2f6cb79ff..9b4c82fecbe 100644 --- a/internal/controller/builder/builder_pvc.go +++ b/internal/controller/builder/builder_pvc.go @@ -35,3 +35,18 @@ func (builder *PVCBuilder) SetResources(resources corev1.ResourceRequirements) * builder.get().Spec.Resources = resources return builder } + +func (builder *PVCBuilder) SetAccessModes(accessModes []corev1.PersistentVolumeAccessMode) *PVCBuilder { + builder.get().Spec.AccessModes = accessModes + return builder +} + +func (builder *PVCBuilder) SetStorageClass(sc string) *PVCBuilder { + builder.get().Spec.StorageClassName = &sc + return builder +} + +func (builder *PVCBuilder) SetDataSource(dataSource corev1.TypedLocalObjectReference) *PVCBuilder { + builder.get().Spec.DataSource = &dataSource + return builder +} diff --git a/internal/controller/builder/builder_pvc_test.go b/internal/controller/builder/builder_pvc_test.go index 518fe0b1e5b..2ee9585d01c 100644 --- a/internal/controller/builder/builder_pvc_test.go +++ b/internal/controller/builder/builder_pvc_test.go @@ -38,12 +38,30 @@ var _ = Describe("pvc builder", func() { "CPU": resource.MustParse("500m"), }, } + accessModes := []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + } + sc := "openebs-local-pv" + apiGroup := "apps.kubeblocks.io/v1alpha1" + dataSource := corev1.TypedLocalObjectReference{ + APIGroup: &apiGroup, + Kind: "Backup", + Name: "cluster-component-backup", + } pvc := NewPVCBuilder(ns, name). SetResources(resources). + SetAccessModes(accessModes). + SetStorageClass(sc). + SetDataSource(dataSource). GetObject() Expect(pvc.Name).Should(Equal(name)) Expect(pvc.Namespace).Should(Equal(ns)) Expect(pvc.Spec.Resources).Should(Equal(resources)) + Expect(pvc.Spec.AccessModes).Should(Equal(accessModes)) + Expect(pvc.Spec.StorageClassName).ShouldNot(BeNil()) + Expect(*pvc.Spec.StorageClassName).Should(Equal(sc)) + Expect(pvc.Spec.DataSource).ShouldNot(BeNil()) + Expect(*pvc.Spec.DataSource).Should(Equal(dataSource)) }) }) diff --git a/internal/controller/builder/builder_role_binding.go b/internal/controller/builder/builder_role_binding.go new file mode 100644 index 00000000000..2a47997dfc5 --- /dev/null +++ b/internal/controller/builder/builder_role_binding.go @@ -0,0 +1,44 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + rbacv1 "k8s.io/api/rbac/v1" +) + +type RoleBindingBuilder struct { + BaseBuilder[rbacv1.RoleBinding, *rbacv1.RoleBinding, RoleBindingBuilder] +} + +func NewRoleBindingBuilder(namespace, name string) *RoleBindingBuilder { + builder := &RoleBindingBuilder{} + builder.init(namespace, name, &rbacv1.RoleBinding{}, builder) + return builder +} + +func (builder *RoleBindingBuilder) SetRoleRef(roleRef rbacv1.RoleRef) *RoleBindingBuilder { + builder.get().RoleRef = roleRef + return builder +} + +func (builder *RoleBindingBuilder) AddSubjects(subjects ...rbacv1.Subject) *RoleBindingBuilder { + builder.get().Subjects = append(builder.get().Subjects, subjects...) + return builder +} diff --git a/internal/controller/builder/builder_role_binding_test.go b/internal/controller/builder/builder_role_binding_test.go new file mode 100644 index 00000000000..b621bbcccf1 --- /dev/null +++ b/internal/controller/builder/builder_role_binding_test.go @@ -0,0 +1,60 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/apecloud/kubeblocks/internal/constant" +) + +var _ = Describe("role binding builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + roleRef := rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: constant.RBACRoleName, + } + subject := rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: ns, + Name: fmt.Sprintf("kb-%s", name), + } + roleBinding := NewRoleBindingBuilder(ns, name). + SetRoleRef(roleRef). + AddSubjects(subject). + GetObject() + + Expect(roleBinding.Name).Should(Equal(name)) + Expect(roleBinding.Namespace).Should(Equal(ns)) + Expect(roleBinding.RoleRef).Should(Equal(roleRef)) + Expect(roleBinding.Subjects).Should(HaveLen(1)) + Expect(roleBinding.Subjects[0]).Should(Equal(subject)) + }) +}) diff --git a/internal/controller/builder/builder_service_account.go b/internal/controller/builder/builder_service_account.go new file mode 100644 index 00000000000..070e7606f64 --- /dev/null +++ b/internal/controller/builder/builder_service_account.go @@ -0,0 +1,34 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + corev1 "k8s.io/api/core/v1" +) + +type ServiceAccountBuilder struct { + BaseBuilder[corev1.ServiceAccount, *corev1.ServiceAccount, ServiceAccountBuilder] +} + +func NewServiceAccountBuilder(namespace, name string) *ServiceAccountBuilder { + builder := &ServiceAccountBuilder{} + builder.init(namespace, name, &corev1.ServiceAccount{}, builder) + return builder +} diff --git a/internal/controller/builder/builder_service_account_test.go b/internal/controller/builder/builder_service_account_test.go new file mode 100644 index 00000000000..3fc6809644a --- /dev/null +++ b/internal/controller/builder/builder_service_account_test.go @@ -0,0 +1,39 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("service account builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + sa := NewServiceAccountBuilder(ns, name). + GetObject() + + Expect(sa.Name).Should(Equal(name)) + Expect(sa.Namespace).Should(Equal(ns)) + }) +}) diff --git a/internal/controller/builder/builder_volume_snapshot_class.go b/internal/controller/builder/builder_volume_snapshot_class.go new file mode 100644 index 00000000000..6224174c60b --- /dev/null +++ b/internal/controller/builder/builder_volume_snapshot_class.go @@ -0,0 +1,44 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" +) + +type VolumeSnapshotClassBuilder struct { + BaseBuilder[snapshotv1.VolumeSnapshotClass, *snapshotv1.VolumeSnapshotClass, VolumeSnapshotClassBuilder] +} + +func NewVolumeSnapshotClassBuilder(namespace, name string) *VolumeSnapshotClassBuilder { + builder := &VolumeSnapshotClassBuilder{} + builder.init(namespace, name, &snapshotv1.VolumeSnapshotClass{}, builder) + return builder +} + +func (builder *VolumeSnapshotClassBuilder) SetDriver(driver string) *VolumeSnapshotClassBuilder { + builder.get().Driver = driver + return builder +} + +func (builder *VolumeSnapshotClassBuilder) SetDeletionPolicy(policy snapshotv1.DeletionPolicy) *VolumeSnapshotClassBuilder { + builder.get().DeletionPolicy = policy + return builder +} diff --git a/internal/controller/builder/builder_volume_snapshot_class_test.go b/internal/controller/builder/builder_volume_snapshot_class_test.go new file mode 100644 index 00000000000..e00aa618405 --- /dev/null +++ b/internal/controller/builder/builder_volume_snapshot_class_test.go @@ -0,0 +1,48 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" +) + +var _ = Describe("volume snapshot class builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + + driver := "openebs-snapshot" + policy := snapshotv1.VolumeSnapshotContentRetain + vsc := NewVolumeSnapshotClassBuilder(ns, name). + SetDriver(driver). + SetDeletionPolicy(policy). + GetObject() + + Expect(vsc.Name).Should(Equal(name)) + Expect(vsc.Namespace).Should(Equal(ns)) + Expect(vsc.Driver).Should(Equal(driver)) + Expect(vsc.DeletionPolicy).Should(Equal(policy)) + }) +}) diff --git a/internal/controller/component/cue/probe_template.cue b/internal/controller/component/cue/probe_template.cue deleted file mode 100644 index 0eae7891988..00000000000 --- a/internal/controller/component/cue/probe_template.cue +++ /dev/null @@ -1,53 +0,0 @@ -//Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -//This file is part of KubeBlocks project -// -//This program is free software: you can redistribute it and/or modify -//it under the terms of the GNU Affero General Public License as published by -//the Free Software Foundation, either version 3 of the License, or -//(at your option) any later version. -// -//This program is distributed in the hope that it will be useful -//but WITHOUT ANY WARRANTY; without even the implied warranty of -//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -//GNU Affero General Public License for more details. -// -//You should have received a copy of the GNU Affero General Public License -//along with this program. If not, see . - -probeContainer: { - image: "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6" - command: ["/pause"] - imagePullPolicy: "IfNotPresent" - name: "string" - "env": [ - { - "name": "KB_SERVICE_USER" - "valueFrom": { - "secretKeyRef": { - "key": "username" - "name": "$(CONN_CREDENTIAL_SECRET_NAME)" - } - } - }, - { - "name": "KB_SERVICE_PASSWORD" - "valueFrom": { - "secretKeyRef": { - "key": "password" - "name": "$(CONN_CREDENTIAL_SECRET_NAME)" - } - } - }, - ] - readinessProbe: { - exec: { - command: [] - } - } - startupProbe: { - tcpSocket: { - port: 3501 - } - } -} diff --git a/internal/controller/component/probe_utils.go b/internal/controller/component/probe_utils.go index f23fe2cd3ce..6c368a725b8 100644 --- a/internal/controller/component/probe_utils.go +++ b/internal/controller/component/probe_utils.go @@ -20,18 +20,17 @@ along with this program. If not, see . package component import ( - "embed" "encoding/json" "fmt" "strconv" "strings" - "github.com/leaanthony/debme" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -48,9 +47,6 @@ const ( ) var ( - //go:embed cue/* - cueTemplates embed.FS - // default probe setting for volume protection. defaultVolumeProtectionProbe = appsv1alpha1.ClusterDefinitionProbe{ PeriodSeconds: 60, @@ -60,11 +56,7 @@ var ( ) func buildProbeContainers(reqCtx intctrlutil.RequestCtx, component *SynthesizedComponent) error { - container, err := buildProbeContainer() - if err != nil { - return err - } - + container := buildProbeContainer() probeContainers := []corev1.Container{} componentProbes := component.Probes if componentProbes == nil { @@ -117,23 +109,37 @@ func buildProbeContainers(reqCtx intctrlutil.RequestCtx, component *SynthesizedC return nil } -func buildProbeContainer() (*corev1.Container, error) { - cueFS, _ := debme.FS(cueTemplates, "cue") - - cueTpl, err := intctrlutil.NewCUETplFromBytes(cueFS.ReadFile("probe_template.cue")) - if err != nil { - return nil, err - } - cueValue := intctrlutil.NewCUEBuilder(*cueTpl) - probeContainerByte, err := cueValue.Lookup("probeContainer") - if err != nil { - return nil, err - } - container := &corev1.Container{} - if err = json.Unmarshal(probeContainerByte, container); err != nil { - return nil, err - } - return container, nil +func buildProbeContainer() *corev1.Container { + return builder.NewContainerBuilder("string"). + SetImage("registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"). + SetImagePullPolicy(corev1.PullIfNotPresent). + AddCommands("/pause"). + AddEnv(corev1.EnvVar{ + Name: "KB_SERVICE_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "username", + LocalObjectReference: corev1.LocalObjectReference{Name: "$(CONN_CREDENTIAL_SECRET_NAME)"}, + }, + }}, + corev1.EnvVar{ + Name: "KB_SERVICE_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "password", + LocalObjectReference: corev1.LocalObjectReference{Name: "$(CONN_CREDENTIAL_SECRET_NAME)"}, + }, + }, + }). + SetReadinessProbe(corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{Command: []string{}}, + }}). + SetStartupProbe(corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{Port: intstr.FromInt(3501)}, + }}). + GetObject() } func buildProbeServiceContainer(component *SynthesizedComponent, container *corev1.Container, probeSvcHTTPPort int, probeSvcGRPCPort int) { diff --git a/internal/controller/component/probe_utils_test.go b/internal/controller/component/probe_utils_test.go index 737083bf40e..64315a5fecd 100644 --- a/internal/controller/component/probe_utils_test.go +++ b/internal/controller/component/probe_utils_test.go @@ -44,9 +44,7 @@ var _ = Describe("probe_utils", func() { var clusterDefProbe *appsv1alpha1.ClusterDefinitionProbe BeforeEach(func() { - var err error - container, err = buildProbeContainer() - Expect(err).NotTo(HaveOccurred()) + container = buildProbeContainer() probeServiceHTTPPort, probeServiceGrpcPort = 3501, 50001 clusterDefProbe = &appsv1alpha1.ClusterDefinitionProbe{} diff --git a/internal/controller/configuration/operator_test.go b/internal/controller/configuration/operator_test.go index 23027af26eb..05b286c13e4 100644 --- a/internal/controller/configuration/operator_test.go +++ b/internal/controller/configuration/operator_test.go @@ -53,8 +53,7 @@ var _ = Describe("ConfigurationOperatorTest", func() { var k8sMockClient *testutil.K8sClientMockHelper mockStatefulSet := func() *appsv1.StatefulSet { - envConfig, err := factory.BuildEnvConfig(clusterObj, clusterComponent) - Expect(err).Should(Succeed()) + envConfig := factory.BuildEnvConfig(clusterObj, clusterComponent) stsObj, err := factory.BuildSts(intctrlutil.RequestCtx{ Ctx: ctx, Log: logger, diff --git a/internal/controller/configuration/pipeline_test.go b/internal/controller/configuration/pipeline_test.go index b24974ffa1e..3f2b8475020 100644 --- a/internal/controller/configuration/pipeline_test.go +++ b/internal/controller/configuration/pipeline_test.go @@ -57,8 +57,7 @@ var _ = Describe("ConfigurationPipelineTest", func() { var k8sMockClient *testutil.K8sClientMockHelper mockStatefulSet := func() *appsv1.StatefulSet { - envConfig, err := factory.BuildEnvConfig(clusterObj, clusterComponent) - Expect(err).Should(Succeed()) + envConfig := factory.BuildEnvConfig(clusterObj, clusterComponent) stsObj, err := factory.BuildSts(intctrlutil.RequestCtx{ Ctx: ctx, Log: logger, diff --git a/internal/controller/configuration/template_wrapper.go b/internal/controller/configuration/template_wrapper.go index 8f82e94761b..8709e761c57 100644 --- a/internal/controller/configuration/template_wrapper.go +++ b/internal/controller/configuration/template_wrapper.go @@ -309,7 +309,7 @@ func generateConfigMapFromTpl(cluster *appsv1alpha1.Cluster, } // Using ConfigMap cue template render to configmap of config - return factory.BuildConfigMapWithTemplate(cluster, component, configs, cmName, configConstraintName, templateSpec) + return factory.BuildConfigMapWithTemplate(cluster, component, configs, cmName, templateSpec), nil } // renderConfigMapTemplate renders config file using template engine diff --git a/internal/controller/factory/builder.go b/internal/controller/factory/builder.go index 4a03a8aac58..716627c19e1 100644 --- a/internal/controller/factory/builder.go +++ b/internal/controller/factory/builder.go @@ -39,6 +39,7 @@ import ( policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/rand" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" @@ -247,10 +248,7 @@ func BuildPersistentVolumeClaimLabels(component *component.SynthesizedComponent, func BuildSvcListWithCustomAttributes(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, customAttributeSetter func(*corev1.Service)) ([]*corev1.Service, error) { - services, err := BuildSvcList(cluster, component) - if err != nil { - return nil, err - } + services := BuildSvcList(cluster, component) if customAttributeSetter != nil { for _, svc := range services { customAttributeSetter(svc) @@ -259,36 +257,81 @@ func BuildSvcListWithCustomAttributes(cluster *appsv1alpha1.Cluster, component * return services, nil } -func BuildSvcList(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) ([]*corev1.Service, error) { - const tplFile = "service_template.cue" +func BuildSvcList(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) []*corev1.Service { + wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) + wellKnownLabels[constant.AppComponentLabelKey] = component.CompDefName + selectors := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) + delete(selectors, constant.AppNameLabelKey) var result = make([]*corev1.Service, 0) for _, item := range component.Services { if len(item.Spec.Ports) == 0 { continue } - svc := corev1.Service{} - if err := buildFromCUE(tplFile, map[string]any{ - "cluster": cluster, - "service": item, - "component": component, - }, "svc", &svc); err != nil { - return nil, err + name := fmt.Sprintf("%s-%s", cluster.Name, component.Name) + if len(item.Name) > 0 { + name = fmt.Sprintf("%s-%s-%s", cluster.Name, component.Name, item.Name) } - result = append(result, &svc) + + svcBuilder := builder.NewServiceBuilder(cluster.Namespace, name). + AddLabelsInMap(wellKnownLabels). + AddAnnotationsInMap(item.Annotations). + AddSelectorsInMap(selectors). + AddPorts(item.Spec.Ports...) + if len(item.Spec.Type) > 0 { + svcBuilder.SetType(item.Spec.Type) + } + svc := svcBuilder.GetObject() + result = append(result, svc) } - return result, nil + return result } -func BuildHeadlessSvc(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) (*corev1.Service, error) { - const tplFile = "headless_service_template.cue" - service := corev1.Service{} - if err := buildFromCUE(tplFile, map[string]any{ - "cluster": cluster, - "component": component, - }, "service", &service); err != nil { - return nil, err - } - return &service, nil +func BuildHeadlessSvc(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) *corev1.Service { + wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) + wellKnownLabels[constant.AppComponentLabelKey] = component.CompDefName + monitorAnnotations := func() map[string]string { + annotations := make(map[string]string, 0) + falseStr := "false" + trueStr := "true" + switch { + case !component.Monitor.Enable: + annotations["monitor.kubeblocks.io/scrape"] = falseStr + annotations["monitor.kubeblocks.io/agamotto"] = falseStr + case component.Monitor.BuiltIn: + annotations["monitor.kubeblocks.io/scrape"] = falseStr + annotations["monitor.kubeblocks.io/agamotto"] = trueStr + default: + annotations["monitor.kubeblocks.io/scrape"] = trueStr + annotations["monitor.kubeblocks.io/path"] = component.Monitor.ScrapePath + annotations["monitor.kubeblocks.io/port"] = strconv.Itoa(int(component.Monitor.ScrapePort)) + annotations["monitor.kubeblocks.io/scheme"] = "http" + annotations["monitor.kubeblocks.io/agamotto"] = falseStr + } + return annotations + }() + servicePorts := func() []corev1.ServicePort { + var servicePorts []corev1.ServicePort + for _, container := range component.PodSpec.Containers { + for _, port := range container.Ports { + servicePort := corev1.ServicePort{ + Name: port.Name, + Protocol: port.Protocol, + Port: port.ContainerPort, + TargetPort: intstr.FromString(port.Name), + } + servicePorts = append(servicePorts, servicePort) + } + } + return servicePorts + }() + return builder.NewHeadlessServiceBuilder(cluster.Namespace, fmt.Sprintf("%s-%s-headless", cluster.Name, component.Name)). + AddLabelsInMap(wellKnownLabels). + AddAnnotationsInMap(monitorAnnotations). + AddSelector(constant.AppInstanceLabelKey, cluster.Name). + AddSelector(constant.AppManagedByLabelKey, constant.AppName). + AddSelector(constant.KBAppComponentLabelKey, component.Name). + AddPorts(servicePorts...). + GetObject() } func BuildSts(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, @@ -352,6 +395,15 @@ func BuildSts(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, return sts, nil } +func buildWellKnownLabels(clusterDefName, clusterName, componentName string) map[string]string { + return map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppNameLabelKey: clusterDefName, + constant.AppInstanceLabelKey: clusterName, + constant.KBAppComponentLabelKey: componentName, + } +} + func BuildRSM(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, envConfigName string) (*workloads.ReplicatedStateMachine, error) { vctToPVC := func(vct corev1.PersistentVolumeClaimTemplate) corev1.PersistentVolumeClaim { @@ -361,12 +413,7 @@ func BuildRSM(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, } } - commonLabels := map[string]string{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppNameLabelKey: component.ClusterDefName, - constant.AppInstanceLabelKey: cluster.Name, - constant.KBAppComponentLabelKey: component.Name, - } + commonLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) addCommonLabels := func(service *corev1.Service) { if service == nil { return @@ -743,19 +790,19 @@ func randomString(length int) string { } func BuildConnCredential(clusterDefinition *appsv1alpha1.ClusterDefinition, cluster *appsv1alpha1.Cluster, - component *component.SynthesizedComponent) (*corev1.Secret, error) { - const tplFile = "conn_credential_template.cue" - - connCredential := corev1.Secret{} - if err := buildFromCUE(tplFile, map[string]any{ - "clusterdefinition": clusterDefinition, - "cluster": cluster, - }, "secret", &connCredential); err != nil { - return nil, err + component *component.SynthesizedComponent) *corev1.Secret { + wellKnownLabels := buildWellKnownLabels(clusterDefinition.Name, cluster.Name, "") + delete(wellKnownLabels, constant.KBAppComponentLabelKey) + credentialBuilder := builder.NewSecretBuilder(cluster.Namespace, fmt.Sprintf("%s-conn-credential", cluster.Name)). + AddLabelsInMap(wellKnownLabels). + SetStringData(clusterDefinition.Spec.ConnectionCredential) + if len(clusterDefinition.Spec.Type) > 0 { + credentialBuilder.AddLabels("apps.kubeblocks.io/cluster-type", clusterDefinition.Spec.Type) } + connCredential := credentialBuilder.GetObject() if len(connCredential.StringData) == 0 { - return &connCredential, nil + return connCredential } replaceVarObjects := func(k, v *string, i int, origValue string, varObjectsMap map[string]string) { @@ -821,19 +868,16 @@ func BuildConnCredential(clusterDefinition *appsv1alpha1.ClusterDefinition, clus m[fmt.Sprintf("$(CONN_CREDENTIAL).%s", k)] = v } replaceData(m) - return &connCredential, nil + return connCredential } -func BuildPDB(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) (*policyv1.PodDisruptionBudget, error) { - const tplFile = "pdb_template.cue" - pdb := policyv1.PodDisruptionBudget{} - if err := buildFromCUE(tplFile, map[string]any{ - "cluster": cluster, - "component": component, - }, "pdb", &pdb); err != nil { - return nil, err - } - return &pdb, nil +func BuildPDB(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) *policyv1.PodDisruptionBudget { + wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) + return builder.NewPDBBuilder(cluster.Namespace, fmt.Sprintf("%s-%s", cluster.Name, component.Name)). + AddLabelsInMap(wellKnownLabels). + AddLabels(constant.AppComponentLabelKey, component.CompDefName). + AddSelectorsInMap(wellKnownLabels). + GetObject() } func BuildDeploy(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, envConfigName string) (*appsv1.Deployment, error) { @@ -859,27 +903,33 @@ func BuildPVC(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, vct *corev1.PersistentVolumeClaimTemplate, pvcKey types.NamespacedName, - snapshotName string) (*corev1.PersistentVolumeClaim, error) { - pvc := corev1.PersistentVolumeClaim{} - if err := buildFromCUE("pvc_template.cue", map[string]any{ - "cluster": cluster, - "component": component, - "volumeClaimTemplate": vct, - "pvc_key": pvcKey, - "snapshot_name": snapshotName, - }, "pvc", &pvc); err != nil { - return nil, err + snapshotName string) *corev1.PersistentVolumeClaim { + wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) + pvcBuilder := builder.NewPVCBuilder(pvcKey.Namespace, pvcKey.Name). + AddLabelsInMap(wellKnownLabels). + AddLabels(constant.VolumeClaimTemplateNameLabelKey, vct.Name). + SetAccessModes(vct.Spec.AccessModes). + SetResources(vct.Spec.Resources) + if vct.Spec.StorageClassName != nil { + pvcBuilder.SetStorageClass(*vct.Spec.StorageClassName) + } + if len(snapshotName) > 0 { + apiGroup := "snapshot.storage.k8s.io" + pvcBuilder.SetDataSource(corev1.TypedLocalObjectReference{ + APIGroup: &apiGroup, + Kind: "VolumeSnapshot", + Name: snapshotName, + }) } - BuildPersistentVolumeClaimLabels(component, &pvc, vct.Name) - return &pvc, nil + pvc := pvcBuilder.GetObject() + BuildPersistentVolumeClaimLabels(component, pvc, vct.Name) + return pvc } // BuildEnvConfig builds cluster component context ConfigMap object, which is to be used in workload container's // envFrom.configMapRef with name of "$(cluster.metadata.name)-$(component.name)-env" pattern. -func BuildEnvConfig(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) (*corev1.ConfigMap, error) { - const tplFile = "env_config_template.cue" +func BuildEnvConfig(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) *corev1.ConfigMap { envData := map[string]string{} - // add component envs if component.ComponentRefEnvs != nil { for _, env := range component.ComponentRefEnvs { @@ -887,167 +937,163 @@ func BuildEnvConfig(cluster *appsv1alpha1.Cluster, component *component.Synthesi } } - config := corev1.ConfigMap{} - if err := buildFromCUE(tplFile, map[string]any{ - "cluster": cluster, - "component": component, - "config.data": envData, - }, "config", &config); err != nil { - return nil, err - } - return &config, nil + wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) + wellKnownLabels[constant.AppComponentLabelKey] = component.CompDefName + return builder.NewConfigMapBuilder(cluster.Namespace, fmt.Sprintf("%s-%s-env", cluster.Name, component.Name)). + AddLabelsInMap(wellKnownLabels). + AddLabels(constant.AppConfigTypeLabelKey, "kubeblocks-env"). + SetData(envData). + GetObject() } func BuildBackup(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, backupPolicyName string, backupKey types.NamespacedName, - backupType string) (*dataprotectionv1alpha1.Backup, error) { - backup := dataprotectionv1alpha1.Backup{} - if err := buildFromCUE("backup_job_template.cue", map[string]any{ - "cluster": cluster, - "component": component, - "backupPolicyName": backupPolicyName, - "backupJobKey": backupKey, - "backupType": backupType, - }, "backupJob", &backup); err != nil { - return nil, err - } - return &backup, nil + backupType string) *dataprotectionv1alpha1.Backup { + return builder.NewBackupBuilder(backupKey.Namespace, backupKey.Name). + AddLabels(constant.BackupTypeLabelKeyKey, backupType). + AddLabels(constant.KBManagedByKey, "cluster"). + AddLabels("backuppolicies.dataprotection.kubeblocks.io/name", backupPolicyName). + AddLabels(constant.AppNameLabelKey, component.ClusterDefName). + AddLabels(constant.AppInstanceLabelKey, cluster.Name). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). + AddLabels(constant.KBAppComponentLabelKey, component.Name). + SetBackupPolicyName(backupPolicyName). + SetBackType(dataprotectionv1alpha1.BackupType(backupType)). + GetObject() } func BuildConfigMapWithTemplate(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, configs map[string]string, cmName string, - configConstraintName string, - configTemplateSpec appsv1alpha1.ComponentTemplateSpec) (*corev1.ConfigMap, error) { - const tplFile = "config_template.cue" - cueFS, _ := debme.FS(cueTemplates, "cue") - cueTpl, err := getCacheCUETplValue(tplFile, func() (*intctrlutil.CUETpl, error) { - return intctrlutil.NewCUETplFromBytes(cueFS.ReadFile(tplFile)) - }) - if err != nil { - return nil, err - } - - cueValue := intctrlutil.NewCUEBuilder(*cueTpl) - // prepare cue data - configMeta := map[string]map[string]string{ - "clusterDefinition": { - "name": cluster.Spec.ClusterDefRef, - }, - "cluster": { - "name": cluster.GetName(), - "namespace": cluster.GetNamespace(), - }, - "component": { - "name": component.Name, - "compDefName": component.CompDefName, - "characterType": component.CharacterType, - "configName": cmName, - "templateName": configTemplateSpec.TemplateRef, - "configConstraintsName": configConstraintName, - "configTemplateName": configTemplateSpec.Name, - }, - } - configBytes, err := json.Marshal(configMeta) - if err != nil { - return nil, err - } - - // Generate config files context by rendering cue template - if err = cueValue.Fill("meta", configBytes); err != nil { - return nil, err - } - - configStrByte, err := cueValue.Lookup("config") - if err != nil { - return nil, err - } - - cm := corev1.ConfigMap{} - if err = json.Unmarshal(configStrByte, &cm); err != nil { - return nil, err - } - - // Update rendered config - cm.Data = configs - return &cm, nil + configTemplateSpec appsv1alpha1.ComponentTemplateSpec) *corev1.ConfigMap { + wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) + wellKnownLabels[constant.AppComponentLabelKey] = component.CompDefName + return builder.NewConfigMapBuilder(cluster.Namespace, cmName). + AddLabelsInMap(wellKnownLabels). + AddLabels(constant.CMConfigurationTypeLabelKey, constant.ConfigInstanceType). + AddLabels(constant.CMTemplateNameLabelKey, configTemplateSpec.TemplateRef). + AddAnnotations(constant.DisableUpgradeInsConfigurationAnnotationKey, strconv.FormatBool(false)). + SetData(configs). + GetObject() } func BuildCfgManagerContainer(sidecarRenderedParam *cfgcm.CfgManagerBuildParams, component *component.SynthesizedComponent) (*corev1.Container, error) { - const tplFile = "config_manager_sidecar.cue" - cueFS, _ := debme.FS(cueTemplates, "cue") - cueTpl, err := getCacheCUETplValue(tplFile, func() (*intctrlutil.CUETpl, error) { - return intctrlutil.NewCUETplFromBytes(cueFS.ReadFile(tplFile)) + var env []corev1.EnvVar + env = append(env, corev1.EnvVar{ + Name: "CONFIG_MANAGER_POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "status.podIP", + }, + }, }) - if err != nil { - return nil, err - } - - cueValue := intctrlutil.NewCUEBuilder(*cueTpl) - paramBytes, err := json.Marshal(sidecarRenderedParam) - if err != nil { - return nil, err - } - - if err = cueValue.Fill("parameter", paramBytes); err != nil { - return nil, err - } - - containerStrByte, err := cueValue.Lookup("template") - if err != nil { - return nil, err + if len(sidecarRenderedParam.CharacterType) > 0 { + env = append(env, corev1.EnvVar{ + Name: "DB_TYPE", + Value: sidecarRenderedParam.CharacterType, + }) } - container := corev1.Container{} - if err = json.Unmarshal(containerStrByte, &container); err != nil { - return nil, err + if sidecarRenderedParam.CharacterType == "mysql" { + env = append(env, corev1.EnvVar{ + Name: "MYSQL_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "username", + LocalObjectReference: corev1.LocalObjectReference{Name: sidecarRenderedParam.SecreteName}, + }, + }, + }, + corev1.EnvVar{ + Name: "MYSQL_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "password", + LocalObjectReference: corev1.LocalObjectReference{Name: sidecarRenderedParam.SecreteName}, + }, + }, + }, + corev1.EnvVar{ + Name: "DATA_SOURCE_NAME", + Value: "$(MYSQL_USER):$(MYSQL_PASSWORD)@(localhost:3306)/", + }, + ) + } + containerBuilder := builder.NewContainerBuilder(sidecarRenderedParam.ManagerName). + AddCommands("env"). + AddArgs("PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$(TOOLS_PATH)"). + AddArgs("/bin/reloader"). + AddArgs(sidecarRenderedParam.Args...). + AddEnv(env...). + SetImage(sidecarRenderedParam.Image). + SetImagePullPolicy(corev1.PullIfNotPresent). + AddVolumeMounts(sidecarRenderedParam.Volumes...) + if sidecarRenderedParam.ShareProcessNamespace { + user := int64(0) + containerBuilder.SetSecurityContext(corev1.SecurityContext{ + RunAsUser: &user, + }) } + container := containerBuilder.GetObject() - if err := injectEnvs(sidecarRenderedParam.Cluster, component, sidecarRenderedParam.EnvConfigName, &container); err != nil { + if err := injectEnvs(sidecarRenderedParam.Cluster, component, sidecarRenderedParam.EnvConfigName, container); err != nil { return nil, err } - intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) - return &container, nil + intctrlutil.InjectZeroResourcesLimitsIfEmpty(container) + return container, nil } -func BuildBackupManifestsJob(key types.NamespacedName, backup *dataprotectionv1alpha1.Backup, podSpec *corev1.PodSpec) (*batchv1.Job, error) { - const tplFile = "backup_manifests_template.cue" - job := &batchv1.Job{} - if err := buildFromCUE(tplFile, - map[string]any{ - "job.metadata.name": key.Name, - "job.metadata.namespace": key.Namespace, - "backup": backup, - "podSpec": podSpec, - }, - "job", job); err != nil { - return nil, err - } - return job, nil +func BuildBackupManifestsJob(key types.NamespacedName, backup *dataprotectionv1alpha1.Backup, podSpec *corev1.PodSpec) *batchv1.Job { + spec := podSpec.DeepCopy() + spec.RestartPolicy = corev1.RestartPolicyNever + ctx := spec.SecurityContext + if ctx == nil { + ctx = &corev1.PodSecurityContext{} + } + user := int64(0) + ctx.RunAsUser = &user + spec.SecurityContext = ctx + return builder.NewJobBuilder(key.Namespace, key.Name). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). + SetPodTemplateSpec(corev1.PodTemplateSpec{Spec: *spec}). + SetBackoffLimit(3). + SetTTLSecondsAfterFinished(10). + GetObject() } func BuildRestoreJob(cluster *appsv1alpha1.Cluster, synthesizedComponent *component.SynthesizedComponent, name, image string, command []string, volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, env []corev1.EnvVar, resources *corev1.ResourceRequirements) (*batchv1.Job, error) { - const tplFile = "restore_job_template.cue" - job := &batchv1.Job{} - fillMaps := map[string]any{ - "job.metadata.name": name, - "job.metadata.namespace": cluster.Namespace, - "job.spec.template.spec.volumes": volumes, - "container.image": image, - "container.command": command, - "container.volumeMounts": volumeMounts, - "container.env": env, - } + containerBuilder := builder.NewContainerBuilder("restore"). + SetImage(image). + SetImagePullPolicy(corev1.PullIfNotPresent). + AddCommands(command...). + AddVolumeMounts(volumeMounts...). + AddEnv(env...) if resources != nil { - fillMaps["container.resources"] = *resources + containerBuilder.SetResources(*resources) + } + container := containerBuilder.GetObject() + + ctx := corev1.PodSecurityContext{} + user := int64(0) + ctx.RunAsUser = &user + pod := builder.NewPodBuilder(cluster.Namespace, ""). + AddContainer(*container). + AddVolumes(volumes...). + SetRestartPolicy(corev1.RestartPolicyOnFailure). + SetSecurityContext(ctx). + GetObject() + template := corev1.PodTemplateSpec{ + Spec: pod.Spec, } - if err := buildFromCUE(tplFile, fillMaps, "job", job); err != nil { - return nil, err - } + job := builder.NewJobBuilder(cluster.Namespace, name). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). + SetPodTemplateSpec(template). + GetObject() containers := job.Spec.Template.Spec.Containers if len(containers) > 0 { if err := injectEnvs(cluster, synthesizedComponent, "", &containers[0]); err != nil { @@ -1066,16 +1112,14 @@ func BuildRestoreJob(cluster *appsv1alpha1.Cluster, synthesizedComponent *compon func BuildCfgManagerToolsContainer(sidecarRenderedParam *cfgcm.CfgManagerBuildParams, component *component.SynthesizedComponent, toolsMetas []appsv1alpha1.ToolConfig, toolsMap map[string]cfgcm.ConfigSpecMeta) ([]corev1.Container, error) { toolContainers := make([]corev1.Container, 0, len(toolsMetas)) for _, toolConfig := range toolsMetas { - toolContainer := corev1.Container{ - Name: toolConfig.Name, - Command: toolConfig.Command, - ImagePullPolicy: corev1.PullIfNotPresent, - VolumeMounts: sidecarRenderedParam.Volumes, - } - if toolConfig.Image != "" { - toolContainer.Image = toolConfig.Image + toolContainerBuilder := builder.NewContainerBuilder(toolConfig.Name). + AddCommands(toolConfig.Command...). + SetImagePullPolicy(corev1.PullIfNotPresent). + AddVolumeMounts(sidecarRenderedParam.Volumes...) + if len(toolConfig.Image) > 0 { + toolContainerBuilder.SetImage(toolConfig.Image) } - toolContainers = append(toolContainers, toolContainer) + toolContainers = append(toolContainers, *toolContainerBuilder.GetObject()) } for i := range toolContainers { container := &toolContainers[i] @@ -1097,39 +1141,54 @@ func setToolsScriptsPath(container *corev1.Container, meta cfgcm.ConfigSpecMeta) }) } -func BuildVolumeSnapshotClass(name string, driver string) (*snapshotv1.VolumeSnapshotClass, error) { - const tplFile = "volumesnapshotclass.cue" - vsc := &snapshotv1.VolumeSnapshotClass{} - if err := buildFromCUE(tplFile, - map[string]any{ - "class.metadata.name": name, - "class.driver": driver, - }, - "class", vsc); err != nil { - return nil, err - } - return vsc, nil -} - -func BuildServiceAccount(cluster *appsv1alpha1.Cluster) (*corev1.ServiceAccount, error) { - return buildRBACObject[corev1.ServiceAccount](cluster, "serviceaccount") +func BuildVolumeSnapshotClass(name string, driver string) *snapshotv1.VolumeSnapshotClass { + return builder.NewVolumeSnapshotClassBuilder("", name). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). + SetDriver(driver). + SetDeletionPolicy(snapshotv1.VolumeSnapshotContentDelete). + GetObject() } -func BuildRoleBinding(cluster *appsv1alpha1.Cluster) (*rbacv1.RoleBinding, error) { - return buildRBACObject[rbacv1.RoleBinding](cluster, "rolebinding") +func BuildServiceAccount(cluster *appsv1alpha1.Cluster) *corev1.ServiceAccount { + wellKnownLabels := buildWellKnownLabels(cluster.Spec.ClusterDefRef, cluster.Name, "") + delete(wellKnownLabels, constant.KBAppComponentLabelKey) + return builder.NewServiceAccountBuilder(cluster.Namespace, fmt.Sprintf("kb-%s", cluster.Name)). + AddLabelsInMap(wellKnownLabels). + GetObject() } -func BuildClusterRoleBinding(cluster *appsv1alpha1.Cluster) (*rbacv1.ClusterRoleBinding, error) { - return buildRBACObject[rbacv1.ClusterRoleBinding](cluster, "clusterrolebinding") +func BuildRoleBinding(cluster *appsv1alpha1.Cluster) *rbacv1.RoleBinding { + wellKnownLabels := buildWellKnownLabels(cluster.Spec.ClusterDefRef, cluster.Name, "") + delete(wellKnownLabels, constant.KBAppComponentLabelKey) + return builder.NewRoleBindingBuilder(cluster.Namespace, fmt.Sprintf("kb-%s", cluster.Name)). + AddLabelsInMap(wellKnownLabels). + SetRoleRef(rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: constant.RBACRoleName, + }). + AddSubjects(rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: cluster.Namespace, + Name: fmt.Sprintf("kb-%s", cluster.Name), + }). + GetObject() } -func buildRBACObject[Tp corev1.ServiceAccount | rbacv1.RoleBinding | rbacv1.ClusterRoleBinding]( - cluster *appsv1alpha1.Cluster, key string) (*Tp, error) { - const tplFile = "rbac_template.cue" - var obj Tp - pObj := &obj - if err := buildFromCUE(tplFile, map[string]any{"cluster": cluster}, key, pObj); err != nil { - return nil, err - } - return pObj, nil +func BuildClusterRoleBinding(cluster *appsv1alpha1.Cluster) *rbacv1.ClusterRoleBinding { + wellKnownLabels := buildWellKnownLabels(cluster.Spec.ClusterDefRef, cluster.Name, "") + delete(wellKnownLabels, constant.KBAppComponentLabelKey) + return builder.NewClusterRoleBindingBuilder(cluster.Namespace, fmt.Sprintf("kb-%s", cluster.Name)). + AddLabelsInMap(wellKnownLabels). + SetRoleRef(rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: constant.RBACClusterRoleName, + }). + AddSubjects(rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: cluster.Namespace, + Name: fmt.Sprintf("kb-%s", cluster.Name), + }). + GetObject() } diff --git a/internal/controller/factory/builder_test.go b/internal/controller/factory/builder_test.go index 8f3807ebd0e..1521f0931d7 100644 --- a/internal/controller/factory/builder_test.go +++ b/internal/controller/factory/builder_test.go @@ -22,18 +22,15 @@ package factory import ( "encoding/json" "fmt" - "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/leaanthony/debme" "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" @@ -47,20 +44,6 @@ import ( viper "github.com/apecloud/kubeblocks/internal/viperx" ) -var tlog = ctrl.Log.WithName("builder_testing") - -func TestReadCUETplFromEmbeddedFS(t *testing.T) { - cueFS, err := debme.FS(cueTemplates, "cue") - if err != nil { - t.Error("Expected no error", err) - } - cueTpl, err := intctrlutil.NewCUETplFromBytes(cueFS.ReadFile("conn_credential_template.cue")) - if err != nil { - t.Error("Expected no error", err) - } - tlog.Info("", "cueValue", cueTpl) -} - var _ = Describe("builder", func() { const clusterDefName = "test-clusterdef" const clusterVersionName = "test-clusterversion" @@ -191,8 +174,7 @@ var _ = Describe("builder", func() { Namespace: "default", Name: "data-mysql-01-replicasets-0", } - pvc, err := BuildPVC(cluster, synthesizedComponent, &synthesizedComponent.VolumeClaimTemplates[0], pvcKey, snapshotName) - Expect(err).Should(BeNil()) + pvc := BuildPVC(cluster, synthesizedComponent, &synthesizedComponent.VolumeClaimTemplates[0], pvcKey, snapshotName) Expect(pvc).ShouldNot(BeNil()) Expect(pvc.Spec.AccessModes).Should(Equal(sts.Spec.VolumeClaimTemplates[0].Spec.AccessModes)) Expect(pvc.Spec.Resources).Should(Equal(synthesizedComponent.VolumeClaimTemplates[0].Spec.Resources)) @@ -212,15 +194,13 @@ var _ = Describe("builder", func() { clusterDefObj = testapps.NewClusterDefFactoryWithConnCredential("conn-cred").GetObject() clusterDef, cluster, synthesizedComponent = newClusterObjs(clusterDefObj) ) - credential, err := BuildConnCredential(clusterDef, cluster, synthesizedComponent) - Expect(err).Should(BeNil()) + credential := BuildConnCredential(clusterDef, cluster, synthesizedComponent) Expect(credential).ShouldNot(BeNil()) Expect(credential.Labels["apps.kubeblocks.io/cluster-type"]).Should(BeEmpty()) By("setting type") characterType := "test-character-type" clusterDef.Spec.Type = characterType - credential, err = BuildConnCredential(clusterDef, cluster, synthesizedComponent) - Expect(err).Should(BeNil()) + credential = BuildConnCredential(clusterDef, cluster, synthesizedComponent) Expect(credential).ShouldNot(BeNil()) Expect(credential.Labels["apps.kubeblocks.io/cluster-type"]).Should(Equal(characterType)) // "username": "root", @@ -412,8 +392,7 @@ var _ = Describe("builder", func() { It("builds PDB correctly", func() { _, cluster, synthesizedComponent := newClusterObjs(nil) - pdb, err := BuildPDB(cluster, synthesizedComponent) - Expect(err).Should(BeNil()) + pdb := BuildPDB(cluster, synthesizedComponent) Expect(pdb).ShouldNot(BeNil()) }) @@ -424,8 +403,7 @@ var _ = Describe("builder", func() { Name: "test-backup-job", } backupPolicyName := "test-backup-policy" - backupJob, err := BuildBackup(cluster, synthesizedComponent, backupPolicyName, backupJobKey, "snapshot") - Expect(err).Should(BeNil()) + backupJob := BuildBackup(cluster, synthesizedComponent, backupPolicyName, backupJobKey, "snapshot") Expect(backupJob).ShouldNot(BeNil()) }) @@ -439,15 +417,14 @@ var _ = Describe("builder", func() { }, ConfigConstraintRef: "test-config-constraint", } - configmap, err := BuildConfigMapWithTemplate(cluster, synthesizedComponent, config, - "test-cm", tplCfg.ConfigConstraintRef, tplCfg.ComponentTemplateSpec) - Expect(err).Should(BeNil()) + configmap := BuildConfigMapWithTemplate(cluster, synthesizedComponent, config, + "test-cm", tplCfg.ComponentTemplateSpec) Expect(configmap).ShouldNot(BeNil()) }) It("builds config manager sidecar container correctly", func() { _, cluster, synthesizedComponent := newClusterObjs(nil) - cfg, err := BuildEnvConfig(cluster, synthesizedComponent) + cfg := BuildEnvConfig(cluster, synthesizedComponent) sidecarRenderedParam := &cfgcm.CfgManagerBuildParams{ ManagerName: "cfgmgr", SecreteName: "test-secret", @@ -460,7 +437,6 @@ var _ = Describe("builder", func() { Volumes: []corev1.VolumeMount{}, Cluster: cluster, } - Expect(err).Should(BeNil()) configmap, err := BuildCfgManagerContainer(sidecarRenderedParam, synthesizedComponent) Expect(err).Should(BeNil()) Expect(configmap).ShouldNot(BeNil()) @@ -498,8 +474,7 @@ var _ = Describe("builder", func() { }, } key := types.NamespacedName{Name: "backup", Namespace: "default"} - job, err := BuildBackupManifestsJob(key, backup, podSpec) - Expect(err).Should(BeNil()) + job := BuildBackupManifestsJob(key, backup, podSpec) Expect(job).ShouldNot(BeNil()) Expect(job.Name).Should(Equal(key.Name)) }) @@ -534,8 +509,7 @@ var _ = Describe("builder", func() { It("builds volume snapshot class correctly", func() { className := "vsc-test" driverName := "csi-driver-test" - obj, err := BuildVolumeSnapshotClass(className, driverName) - Expect(err).Should(BeNil()) + obj := BuildVolumeSnapshotClass(className, driverName) Expect(obj).ShouldNot(BeNil()) Expect(obj.Name).Should(Equal(className)) Expect(obj.Driver).Should(Equal(driverName)) @@ -544,8 +518,7 @@ var _ = Describe("builder", func() { It("builds headless svc correctly", func() { _, cluster, synthesizedComponent := newClusterObjs(nil) expectSvcName := fmt.Sprintf("%s-%s-headless", cluster.Name, synthesizedComponent.Name) - obj, err := BuildHeadlessSvc(cluster, synthesizedComponent) - Expect(err).Should(BeNil()) + obj := BuildHeadlessSvc(cluster, synthesizedComponent) Expect(obj).ShouldNot(BeNil()) Expect(obj.Name).Should(Equal(expectSvcName)) }) @@ -572,8 +545,7 @@ var _ = Describe("builder", func() { It("builds serviceaccount correctly", func() { _, cluster, _ := newClusterObjs(nil) expectName := fmt.Sprintf("kb-%s", cluster.Name) - sa, err := BuildServiceAccount(cluster) - Expect(err).Should(BeNil()) + sa := BuildServiceAccount(cluster) Expect(sa).ShouldNot(BeNil()) Expect(sa.Name).Should(Equal(expectName)) }) @@ -581,8 +553,7 @@ var _ = Describe("builder", func() { It("builds rolebinding correctly", func() { _, cluster, _ := newClusterObjs(nil) expectName := fmt.Sprintf("kb-%s", cluster.Name) - rb, err := BuildRoleBinding(cluster) - Expect(err).Should(BeNil()) + rb := BuildRoleBinding(cluster) Expect(rb).ShouldNot(BeNil()) Expect(rb.Name).Should(Equal(expectName)) }) @@ -590,8 +561,7 @@ var _ = Describe("builder", func() { It("builds clusterrolebinding correctly", func() { _, cluster, _ := newClusterObjs(nil) expectName := fmt.Sprintf("kb-%s", cluster.Name) - crb, err := BuildClusterRoleBinding(cluster) - Expect(err).Should(BeNil()) + crb := BuildClusterRoleBinding(cluster) Expect(crb).ShouldNot(BeNil()) Expect(crb.Name).Should(Equal(expectName)) }) diff --git a/internal/controller/factory/cue/backup_job_template.cue b/internal/controller/factory/cue/backup_job_template.cue deleted file mode 100644 index 6fa6777d33f..00000000000 --- a/internal/controller/factory/cue/backup_job_template.cue +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - name: string - } -} -component: { - clusterDefName: string - name: string -} -backupPolicyName: string -backupJobKey: { - Name: string - Namespace: string -} -backupType: string -backupJob: { - apiVersion: "dataprotection.kubeblocks.io/v1alpha1" - kind: "Backup" - metadata: { - name: backupJobKey.Name - namespace: backupJobKey.Namespace - labels: { - "dataprotection.kubeblocks.io/backup-type": backupType - "apps.kubeblocks.io/managed-by": "cluster" - "backuppolicies.dataprotection.kubeblocks.io/name": backupPolicyName - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - "apps.kubeblocks.io/component-name": "\(component.name)" - } - } - spec: { - "backupPolicyName": backupPolicyName - "backupType": backupType - } -} diff --git a/internal/controller/factory/cue/backup_manifests_template.cue b/internal/controller/factory/cue/backup_manifests_template.cue deleted file mode 100644 index cd41196c069..00000000000 --- a/internal/controller/factory/cue/backup_manifests_template.cue +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -backup: { - metadata: { - name: string - namespace: string - } -} - -podSpec: { - containers: [...] - volumes: [...] - env: [...] - restartPolicy: "Never" - securityContext: - runAsUser: 0 -} - -job: { - apiVersion: "batch/v1" - kind: "Job" - metadata: { - name: string - namespace: string - labels: - "app.kubernetes.io/managed-by": "kubeblocks" - } - spec: { - template: { - spec: podSpec - } - backOffLimit: 3 - ttlSecondsAfterFinished: 10 - } -} diff --git a/internal/controller/factory/cue/config_manager_sidecar.cue b/internal/controller/factory/cue/config_manager_sidecar.cue deleted file mode 100644 index 9eeea7dd637..00000000000 --- a/internal/controller/factory/cue/config_manager_sidecar.cue +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -template: { - name: parameter.name - command: [ - "env", - ] - args: [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$(TOOLS_PATH)", - "/bin/reloader", - for arg in parameter.args { - arg - }, - ] - env: [ - { - name: "CONFIG_MANAGER_POD_IP" - valueFrom: { - fieldRef: { - apiVersion: "v1" - fieldPath: "status.podIP" - } - } - }, - if parameter.characterType != "" { - { - name: "DB_TYPE" - value: parameter.characterType - } - }, - if parameter.characterType == "mysql" { - { - name: "MYSQL_USER" - valueFrom: { - secretKeyRef: { - key: "username" - name: parameter.secreteName - } - } - } - }, - if parameter.characterType == "mysql" { - { - name: "MYSQL_PASSWORD" - valueFrom: { - secretKeyRef: { - key: "password" - name: parameter.secreteName - } - } - } - }, - if parameter.characterType == "mysql" { - { - name: "DATA_SOURCE_NAME" - value: "$(MYSQL_USER):$(MYSQL_PASSWORD)@(localhost:3306)/" - } - }, - // other type - ] - - image: parameter.sidecarImage - imagePullPolicy: "IfNotPresent" - volumeMounts: parameter.volumes - if parameter.shareProcessNamespace { - { - securityContext: - runAsUser: 0 - defaultAllowPrivilegeEscalation: false - } - } -} - -#ArgType: string -#EnvType: { - name: string - value: string - - // valueFrom - ... -} - -parameter: { - name: string - characterType: string - sidecarImage: string - secreteName: string - shareProcessNamespace: bool - args: [...#ArgType] - // envs?: [...#EnvType] - volumes: [...] -} diff --git a/internal/controller/factory/cue/config_template.cue b/internal/controller/factory/cue/config_template.cue deleted file mode 100644 index f07dad738bf..00000000000 --- a/internal/controller/factory/cue/config_template.cue +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -meta: { - clusterDefinition: { - name: string - } - - cluster: { - namespace: string - name: string - } - - component: { - name: string - configName: string - templateName: string - configConstraintsName: string - configTemplateName: string - compDefName: string - } -} - -config: { - apiVersion: "v1" - kind: "ConfigMap" - metadata: { - name: meta.component.configName - namespace: meta.cluster.namespace - labels: { - "app.kubernetes.io/name": "\(meta.clusterDefinition.name)" - "app.kubernetes.io/instance": meta.cluster.name - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(meta.component.compDefName)" - - "apps.kubeblocks.io/component-name": "\(meta.component.name)" - // configmap selector for ConfigureController - "config.kubeblocks.io/config-type": "instance" - // config template name - "config.kubeblocks.io/template-name": "\(meta.component.templateName)" - } - annotations: { - // enable configmap upgrade - "config.kubeblocks.io/disable-reconfigure": "false" - } - - data: { - } - } -} diff --git a/internal/controller/factory/cue/conn_credential_template.cue b/internal/controller/factory/cue/conn_credential_template.cue deleted file mode 100644 index 237012a497e..00000000000 --- a/internal/controller/factory/cue/conn_credential_template.cue +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -clusterdefinition: { - metadata: { - name: string - } - spec: { - type: string - connectionCredential: {...} - } -} -cluster: { - metadata: { - namespace: string - name: string - } -} -secret: { - apiVersion: "v1" - stringData: clusterdefinition.spec.connectionCredential - kind: "Secret" - metadata: { - name: "\(cluster.metadata.name)-conn-credential" - namespace: cluster.metadata.namespace - labels: { - "app.kubernetes.io/name": "\(clusterdefinition.metadata.name)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - if clusterdefinition.spec.type != _|_ { - "apps.kubeblocks.io/cluster-type": clusterdefinition.spec.type - } - } - } -} diff --git a/internal/controller/factory/cue/env_config_template.cue b/internal/controller/factory/cue/env_config_template.cue deleted file mode 100644 index f2babc484c3..00000000000 --- a/internal/controller/factory/cue/env_config_template.cue +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - namespace: string - name: string - } -} -component: { - name: string - clusterDefName: string - compDefName: string -} - -config: { - apiVersion: "v1" - kind: "ConfigMap" - metadata: { - // this naming pattern has been referenced elsewhere, complete code scan is - // required if this naming pattern is going be changed. - name: "\(cluster.metadata.name)-\(component.name)-env" - namespace: cluster.metadata.namespace - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - - // configmap selector for env update - "apps.kubeblocks.io/config-type": "kubeblocks-env" - "apps.kubeblocks.io/component-name": "\(component.name)" - } - } - data: [string]: string -} diff --git a/internal/controller/factory/cue/headless_service_template.cue b/internal/controller/factory/cue/headless_service_template.cue deleted file mode 100644 index c378fce32eb..00000000000 --- a/internal/controller/factory/cue/headless_service_template.cue +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - namespace: string - name: string - } -} -component: { - clusterDefName: string - compDefName: string - name: string - monitor: { - enable: bool - builtIn: bool - scrapePort: int - scrapePath: string - } - podSpec: containers: [...] -} - -service: { - "apiVersion": "v1" - "kind": "Service" - "metadata": { - namespace: cluster.metadata.namespace - name: "\(cluster.metadata.name)-\(component.name)-headless" - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - annotations: { - if component.monitor.enable == false { - "monitor.kubeblocks.io/scrape": "false" - "monitor.kubeblocks.io/agamotto": "false" - } - if component.monitor.enable == true && component.monitor.builtIn == false { - "monitor.kubeblocks.io/scrape": "true" - "monitor.kubeblocks.io/path": component.monitor.scrapePath - "monitor.kubeblocks.io/port": "\(component.monitor.scrapePort)" - "monitor.kubeblocks.io/scheme": "http" - "monitor.kubeblocks.io/agamotto": "false" - } - if component.monitor.enable == true && component.monitor.builtIn == true { - "monitor.kubeblocks.io/scrape": "false" - "monitor.kubeblocks.io/agamotto": "true" - } - } - } - "spec": { - "type": "ClusterIP" - "clusterIP": "None" - "selector": { - "app.kubernetes.io/instance": "\(cluster.metadata.name)" - "app.kubernetes.io/managed-by": "kubeblocks" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - ports: [ - for _, container in component.podSpec.containers if container.ports != _|_ - for _, v in container.ports { - name: v.name - protocol: v.protocol - port: v.containerPort - targetPort: v.name - }, - ] - } -} diff --git a/internal/controller/factory/cue/pdb_template.cue b/internal/controller/factory/cue/pdb_template.cue deleted file mode 100644 index 1c89fa6f3f5..00000000000 --- a/internal/controller/factory/cue/pdb_template.cue +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - namespace: string - name: string - } -} -component: { - clusterDefName: string - compDefName: string - name: string - minAvailable: string | int -} - -pdb: { - "apiVersion": "policy/v1" - "kind": "PodDisruptionBudget" - "metadata": { - namespace: cluster.metadata.namespace - name: "\(cluster.metadata.name)-\(component.name)" - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - } - "spec": { - if component.minAvailable != _|_ { - minAvailable: component.minAvailable - } - selector: { - matchLabels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": "\(cluster.metadata.name)" - "app.kubernetes.io/managed-by": "kubeblocks" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - } - } -} diff --git a/internal/controller/factory/cue/pvc_template.cue b/internal/controller/factory/cue/pvc_template.cue deleted file mode 100644 index 1eb451f3c71..00000000000 --- a/internal/controller/factory/cue/pvc_template.cue +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - name: string - } -} -component: { - clusterDefName: string - name: string -} -volumeClaimTemplate: { - metadata: { - name: string - } - spec: { - accessModes: [string] - resources: {} - } -} -snapshot_name: string -pvc_key: { - Name: string - Namespace: string -} -pvc: { - kind: "PersistentVolumeClaim" - apiVersion: "v1" - metadata: { - name: pvc_key.Name - namespace: pvc_key.Namespace - labels: { - "apps.kubeblocks.io/vct-name": volumeClaimTemplate.metadata.name - if component.clusterDefName != _|_ { - "app.kubernetes.io/name": "\(component.clusterDefName)" - } - if component.name != _|_ { - "apps.kubeblocks.io/component-name": "\(component.name)" - } - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - } - } - spec: { - accessModes: volumeClaimTemplate.spec.accessModes - resources: volumeClaimTemplate.spec.resources - if volumeClaimTemplate.spec.storageClassName != _|_ { - storageClassName: volumeClaimTemplate.spec.storageClassName - } - if len(snapshot_name) > 0 { - dataSource: { - "name": snapshot_name - "kind": "VolumeSnapshot" - "apiGroup": "snapshot.storage.k8s.io" - } - } - } -} diff --git a/internal/controller/factory/cue/rbac_template.cue b/internal/controller/factory/cue/rbac_template.cue deleted file mode 100644 index d3c75799d7f..00000000000 --- a/internal/controller/factory/cue/rbac_template.cue +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - namespace: string - name: string - } - spec: { - clusterDefinitionRef: string - } -} - -serviceaccount: { - apiVersion: "v1" - kind: "ServiceAccount" - metadata: { - namespace: cluster.metadata.namespace - name: "kb-\(cluster.metadata.name)" - labels: { - "app.kubernetes.io/name": cluster.spec.clusterDefinitionRef - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - } - } -} - -rolebinding: { - apiVersion: "rbac.authorization.k8s.io/v1" - kind: "RoleBinding" - metadata: { - name: "kb-\(cluster.metadata.name)" - namespace: cluster.metadata.namespace - labels: { - "app.kubernetes.io/name": cluster.spec.clusterDefinitionRef - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - } - } - roleRef: { - apiGroup: "rbac.authorization.k8s.io" - kind: "ClusterRole" - name: "kubeblocks-cluster-pod-role" - } - subjects: [{ - kind: "ServiceAccount" - name: "kb-\(cluster.metadata.name)" - namespace: cluster.metadata.namespace - }] -} - -clusterrolebinding: { - apiVersion: "rbac.authorization.k8s.io/v1" - kind: "ClusterRoleBinding" - metadata: { - name: "kb-\(cluster.metadata.name)" - labels: { - "app.kubernetes.io/name": cluster.spec.clusterDefinitionRef - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - } - } - roleRef: { - apiGroup: "rbac.authorization.k8s.io" - kind: "ClusterRole" - name: "kubeblocks-volume-protection-pod-role" - } - subjects: [{ - kind: "ServiceAccount" - name: "kb-\(cluster.metadata.name)" - namespace: cluster.metadata.namespace - }] -} diff --git a/internal/controller/factory/cue/restore_job_template.cue b/internal/controller/factory/cue/restore_job_template.cue deleted file mode 100644 index 50c65a5ce87..00000000000 --- a/internal/controller/factory/cue/restore_job_template.cue +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -container: { - name: "restore" - image: string - imagePullPolicy: "IfNotPresent" - command: [...] - volumeMounts: [...] - env: [...] - resources: {} -} - -job: { - apiVersion: "batch/v1" - kind: "Job" - metadata: { - name: string - namespace: string - labels: { - "app.kubernetes.io/managed-by": "kubeblocks" - } - } - spec: { - template: { - spec: { - containers: [container] - volumes: [...] - restartPolicy: "OnFailure" - securityContext: - runAsUser: 0 - } - } - } -} diff --git a/internal/controller/factory/cue/service_template.cue b/internal/controller/factory/cue/service_template.cue deleted file mode 100644 index 1359763b1d6..00000000000 --- a/internal/controller/factory/cue/service_template.cue +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - namespace: string - name: string - } -} - -component: { - clusterDefName: string - compDefName: string - name: string -} - -service: { - metadata: { - name: string - annotations: {} - } - spec: { - ports: [...] - type: string - } -} - -svc: { - "apiVersion": "v1" - "kind": "Service" - "metadata": { - namespace: cluster.metadata.namespace - if service.metadata.name != _|_ { - name: "\(cluster.metadata.name)-\(component.name)-\(service.metadata.name)" - } - if service.metadata.name == _|_ { - name: "\(cluster.metadata.name)-\(component.name)" - } - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - annotations: service.metadata.annotations - } - "spec": { - "selector": { - "app.kubernetes.io/instance": "\(cluster.metadata.name)" - "app.kubernetes.io/managed-by": "kubeblocks" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - ports: service.spec.ports - if service.spec.type != _|_ { - type: service.spec.type - } - if service.spec.type == "LoadBalancer" { - // Set externalTrafficPolicy to Local has two benefits: - // 1. preserve client IP - // 2. improve network performance by reducing one hop - externalTrafficPolicy: "Local" - } - } -} diff --git a/internal/controller/factory/cue/statefulset_template.cue b/internal/controller/factory/cue/statefulset_template.cue deleted file mode 100644 index fa825577aeb..00000000000 --- a/internal/controller/factory/cue/statefulset_template.cue +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - namespace: string - name: string - } - spec: { - clusterVersionRef: string - } -} -component: { - clusterDefName: string - compDefName: string - name: string - workloadType: string - replicas: int - podSpec: { - containers: [...] - enableServiceLinks: bool | *false - } - volumeClaimTemplates: [...] -} - -statefulset: { - apiVersion: "apps/v1" - kind: "StatefulSet" - metadata: { - namespace: cluster.metadata.namespace - name: "\(cluster.metadata.name)-\(component.name)" - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - } - spec: { - selector: - matchLabels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": "\(cluster.metadata.name)" - "app.kubernetes.io/managed-by": "kubeblocks" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - serviceName: "\(cluster.metadata.name)-\(component.name)-headless" - replicas: component.replicas - template: { - metadata: { - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": "\(cluster.metadata.name)" - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - if cluster.spec.clusterVersionRef != _|_ { - "app.kubernetes.io/version": "\(cluster.spec.clusterVersionRef)" - } - - "apps.kubeblocks.io/component-name": "\(component.name)" - "apps.kubeblocks.io/workload-type": "\(component.workloadType)" - } - } - spec: component.podSpec - } - volumeClaimTemplates: component.volumeClaimTemplates - } -} diff --git a/internal/controller/factory/cue/volumesnapshotclass.cue b/internal/controller/factory/cue/volumesnapshotclass.cue deleted file mode 100644 index 9759ae77581..00000000000 --- a/internal/controller/factory/cue/volumesnapshotclass.cue +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -class: { - apiVersion: "snapshot.storage.k8s.io/v1" - kind: "VolumeSnapshotClass" - metadata: { - name: string - labels: { - "app.kubernetes.io/managed-by": "kubeblocks" - } - } - driver: string - deletionPolicy: "Delete" -} diff --git a/internal/controller/plan/prepare_test.go b/internal/controller/plan/prepare_test.go index a859c708835..21936d97059 100644 --- a/internal/controller/plan/prepare_test.go +++ b/internal/controller/plan/prepare_test.go @@ -63,10 +63,7 @@ func buildComponentResources(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster.UID = types.UID("test-uid") } workloadProcessor := func(customSetup func(*corev1.ConfigMap) (client.Object, error)) error { - envConfig, err := factory.BuildEnvConfig(cluster, component) - if err != nil { - return err - } + envConfig := factory.BuildEnvConfig(cluster, component) resources = append(resources, envConfig) workload, err := customSetup(envConfig) @@ -79,10 +76,7 @@ func buildComponentResources(reqCtx intctrlutil.RequestCtx, cli client.Client, resources = append(resources, workload) }() - svc, err := factory.BuildHeadlessSvc(cluster, component) - if err != nil { - return err - } + svc := factory.BuildHeadlessSvc(cluster, component) resources = append(resources, svc) var podSpec *corev1.PodSpec @@ -138,10 +132,7 @@ func buildComponentResources(reqCtx intctrlutil.RequestCtx, cli client.Client, // if no these handle, the cluster controller will occur an error during reconciling. // conditional build PodDisruptionBudget if component.MinAvailable != nil { - pdb, err := factory.BuildPDB(cluster, component) - if err != nil { - return nil, err - } + pdb := factory.BuildPDB(cluster, component) resources = append(resources, pdb) } else { panic("this shouldn't happen") diff --git a/internal/controller/plan/restore.go b/internal/controller/plan/restore.go index b85676235f1..6c1237ad6f7 100644 --- a/internal/controller/plan/restore.go +++ b/internal/controller/plan/restore.go @@ -518,10 +518,7 @@ func (p *RestoreManager) createDataPVCs(synthesizedComponent *component.Synthesi for i := int32(0); i < synthesizedComponent.Replicas; i++ { pvcName := fmt.Sprintf("%s-%s-%s-%d", vct.Name, p.Cluster.Name, synthesizedComponent.Name, i) pvcKey := types.NamespacedName{Namespace: p.Cluster.Namespace, Name: pvcName} - pvc, err := factory.BuildPVC(p.Cluster, synthesizedComponent, &vct, pvcKey, snapshotName) - if err != nil { - return err - } + pvc := factory.BuildPVC(p.Cluster, synthesizedComponent, &vct, pvcKey, snapshotName) // Prevents halt recovery from checking uncleaned resources if pvc.Annotations == nil { pvc.Annotations = map[string]string{} @@ -529,7 +526,7 @@ func (p *RestoreManager) createDataPVCs(synthesizedComponent *component.Synthesi pvc.Annotations[constant.LastAppliedClusterAnnotationKey] = fmt.Sprintf(`{"metadata":{"uid":"%s","name":"%s"}}`, p.Cluster.UID, p.Cluster.Name) - if err = p.Client.Create(p.Ctx, pvc); err != nil && !apierrors.IsAlreadyExists(err) { + if err := p.Client.Create(p.Ctx, pvc); err != nil && !apierrors.IsAlreadyExists(err) { return err } } From d1558d60ac0ff4ed34d5fa954a31c9ee4f1d5bae Mon Sep 17 00:00:00 2001 From: xuriwuyun Date: Wed, 20 Sep 2023 18:03:40 +0800 Subject: [PATCH 03/58] fix: hscale without lorry (#5200) --- controllers/apps/components/base_stateful.go | 6 ++++++ lorry/client/client.go | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/controllers/apps/components/base_stateful.go b/controllers/apps/components/base_stateful.go index 6e4678ef32e..3d1806a8a70 100644 --- a/controllers/apps/components/base_stateful.go +++ b/controllers/apps/components/base_stateful.go @@ -765,6 +765,12 @@ func (c *rsmComponentBase) leaveMember4ScaleIn(reqCtx intctrlutil.RequestCtx, cl } continue } + + if lorryCli == nil { + // no lorry in the pod + continue + } + if err2 := lorryCli.LeaveMember(reqCtx.Ctx); err2 != nil { if err == nil { err = err2 diff --git a/lorry/client/client.go b/lorry/client/client.go index 5712fef0bc9..07aa4de0d41 100644 --- a/lorry/client/client.go +++ b/lorry/client/client.go @@ -102,7 +102,8 @@ func NewClientWithPod(pod *corev1.Pod, characterType string) (*OperationClient, port, err := intctrlutil.GetProbeHTTPPort(pod) if err != nil { - return nil, err + // not lorry in the pod, just return nil without error + return nil, nil } // don't use default http-client From c9f9e2937843adab4b925e78113f6e4943dcb204 Mon Sep 17 00:00:00 2001 From: free6om Date: Wed, 20 Sep 2023 18:07:50 +0800 Subject: [PATCH 04/58] chore: role probe 2.0 (#5178) role probe 2.0(support global role snapshot) --- apis/apps/v1alpha1/clusterdefinition_types.go | 37 +++ apis/apps/v1alpha1/zz_generated.deepcopy.go | 41 ++++ .../v1alpha1/replicatedstatemachine_types.go | 18 +- ...apps.kubeblocks.io_clusterdefinitions.yaml | 229 ++++++++++++++++++ ...kubeblocks.io_replicatedstatemachines.yaml | 11 +- config/webhook/manifests.yaml | 58 ++--- controllers/apps/cluster_controller_test.go | 44 +--- .../apps/components/consensus_set_utils.go | 2 +- .../apps/components/replication_set_utils.go | 2 +- controllers/k8score/event_controller_test.go | 32 ++- controllers/k8score/event_handler.go | 1 - .../k8score/role_change_event_handler.go | 87 +------ controllers/k8score/suite_test.go | 4 + ...apps.kubeblocks.io_clusterdefinitions.yaml | 229 ++++++++++++++++++ ...kubeblocks.io_replicatedstatemachines.yaml | 11 +- internal/common/doc.go | 24 ++ internal/common/types.go | 39 +++ internal/constant/const.go | 12 +- internal/controller/component/component.go | 12 +- .../controller/component/component_test.go | 2 +- internal/controller/component/probe_utils.go | 213 +++------------- .../controller/component/probe_utils_test.go | 23 +- internal/controller/component/type.go | 1 + internal/controller/factory/builder.go | 10 + internal/controller/plan/prepare_test.go | 2 +- .../controller/rsm/pod_role_event_handler.go | 116 ++++++--- .../rsm/pod_role_event_handler_test.go | 2 +- .../rsm/transformer_object_generation.go | 54 ++++- internal/controller/rsm/types.go | 45 ++-- internal/controller/rsm/utils.go | 34 ++- internal/controllerutil/pod_utils.go | 8 +- lorry/binding/base.go | 87 +------ lorry/binding/custom/custom.go | 83 +++---- lorry/binding/custom/custom_test.go | 15 +- lorry/binding/mysql/mysql.go | 2 - lorry/binding/types.go | 2 + lorry/binding/utils.go | 25 +- .../http/probe/checks_middleware.go | 21 +- lorry/middleware/http/probe/router.go | 15 +- lorry/util/types.go | 7 +- 40 files changed, 1034 insertions(+), 626 deletions(-) create mode 100644 internal/common/doc.go create mode 100644 internal/common/types.go diff --git a/apis/apps/v1alpha1/clusterdefinition_types.go b/apis/apps/v1alpha1/clusterdefinition_types.go index 141ed5cc8c8..6d2e9514cb6 100644 --- a/apis/apps/v1alpha1/clusterdefinition_types.go +++ b/apis/apps/v1alpha1/clusterdefinition_types.go @@ -23,6 +23,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) // ClusterDefinitionSpec defines the desired state of ClusterDefinition @@ -378,20 +380,30 @@ type ClusterComponentDefinition struct { // statelessSpec defines stateless related spec if workloadType is Stateless. // +optional + //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use RSMSpec instead." StatelessSpec *StatelessSetSpec `json:"statelessSpec,omitempty"` // statefulSpec defines stateful related spec if workloadType is Stateful. // +optional + //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use RSMSpec instead." StatefulSpec *StatefulSetSpec `json:"statefulSpec,omitempty"` // consensusSpec defines consensus related spec if workloadType is Consensus, required if workloadType is Consensus. // +optional + //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use RSMSpec instead." ConsensusSpec *ConsensusSetSpec `json:"consensusSpec,omitempty"` // replicationSpec defines replication related spec if workloadType is Replication. // +optional + //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use RSMSpec instead." ReplicationSpec *ReplicationSetSpec `json:"replicationSpec,omitempty"` + // RSMSpec defines workload related spec of this component. + // start from KB 0.7.0, RSM(ReplicatedStateMachineSpec) will be the underlying CR which powers all kinds of workload in KB. + // RSM is an enhanced stateful workload extension dedicated for heavy-state workloads like databases. + // +optional + RSMSpec *RSMSpec `json:"rsmSpec,omitempty"` + // horizontalScalePolicy controls the behavior of horizontal scale. // +optional HorizontalScalePolicy *HorizontalScalePolicy `json:"horizontalScalePolicy,omitempty"` @@ -701,6 +713,7 @@ type ClusterDefinitionProbes struct { // Probe for DB role changed check. // +optional + //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use RSMSpec instead." RoleProbe *ClusterDefinitionProbe `json:"roleProbe,omitempty"` // roleProbeTimeoutAfterPodsReady(in seconds), when all pods of the component are ready, @@ -881,6 +894,30 @@ type ConsensusMember struct { Replicas *int32 `json:"replicas,omitempty"` } +type RSMSpec struct { + // Roles, a list of roles defined in the system. + // +optional + Roles []workloads.ReplicaRole `json:"roles,omitempty"` + + // RoleProbe provides method to probe role. + // +optional + RoleProbe *workloads.RoleProbe `json:"roleProbe,omitempty"` + + // MembershipReconfiguration provides actions to do membership dynamic reconfiguration. + // +optional + MembershipReconfiguration *workloads.MembershipReconfiguration `json:"membershipReconfiguration,omitempty"` + + // MemberUpdateStrategy, Members(Pods) update strategy. + // serial: update Members one by one that guarantee minimum component unavailable time. + // Learner -> Follower(with AccessMode=none) -> Follower(with AccessMode=readonly) -> Follower(with AccessMode=readWrite) -> Leader + // bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. + // Learner, Follower(minority) in parallel -> Follower(majority) -> Leader, keep majority online all the time. + // parallel: force parallel + // +kubebuilder:validation:Enum={Serial,BestEffortParallel,Parallel} + // +optional + MemberUpdateStrategy *workloads.MemberUpdateStrategy `json:"memberUpdateStrategy,omitempty"` +} + type ReplicationSetSpec struct { StatefulSetSpec `json:",inline"` } diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index 4a5aa7365f1..e3a08ddd5ae 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,7 @@ along with this program. If not, see . package v1alpha1 import ( + workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -460,6 +461,11 @@ func (in *ClusterComponentDefinition) DeepCopyInto(out *ClusterComponentDefiniti *out = new(ReplicationSetSpec) (*in).DeepCopyInto(*out) } + if in.RSMSpec != nil { + in, out := &in.RSMSpec, &out.RSMSpec + *out = new(RSMSpec) + (*in).DeepCopyInto(*out) + } if in.HorizontalScalePolicy != nil { in, out := &in.HorizontalScalePolicy, &out.HorizontalScalePolicy *out = new(HorizontalScalePolicy) @@ -3109,6 +3115,41 @@ func (in *ProvisionStatements) DeepCopy() *ProvisionStatements { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSMSpec) DeepCopyInto(out *RSMSpec) { + *out = *in + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]workloadsv1alpha1.ReplicaRole, len(*in)) + copy(*out, *in) + } + if in.RoleProbe != nil { + in, out := &in.RoleProbe, &out.RoleProbe + *out = new(workloadsv1alpha1.RoleProbe) + (*in).DeepCopyInto(*out) + } + if in.MembershipReconfiguration != nil { + in, out := &in.MembershipReconfiguration, &out.MembershipReconfiguration + *out = new(workloadsv1alpha1.MembershipReconfiguration) + (*in).DeepCopyInto(*out) + } + if in.MemberUpdateStrategy != nil { + in, out := &in.MemberUpdateStrategy, &out.MemberUpdateStrategy + *out = new(workloadsv1alpha1.MemberUpdateStrategy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSMSpec. +func (in *RSMSpec) DeepCopy() *RSMSpec { + if in == nil { + return nil + } + out := new(RSMSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Reconfigure) DeepCopyInto(out *Reconfigure) { *out = *in diff --git a/apis/workloads/v1alpha1/replicatedstatemachine_types.go b/apis/workloads/v1alpha1/replicatedstatemachine_types.go index 013aed80aec..9f1fdd2146b 100644 --- a/apis/workloads/v1alpha1/replicatedstatemachine_types.go +++ b/apis/workloads/v1alpha1/replicatedstatemachine_types.go @@ -149,7 +149,7 @@ type ReplicatedStateMachineStatus struct { // +kubebuilder:printcolumn:name="REPLICAS",type="string",JSONPath=".status.replicas",description="total replicas." // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// ReplicatedStateMachine is the Schema for the replicatedstatemachines API +// ReplicatedStateMachine is the Schema for the replicatedstatemachines API. type ReplicatedStateMachine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -210,6 +210,16 @@ const ( ParallelUpdateStrategy MemberUpdateStrategy = "Parallel" ) +// RoleUpdateMechanism defines the way how pod role label being updated. +// +enum +type RoleUpdateMechanism string + +const ( + ReadinessProbeEventUpdate RoleUpdateMechanism = "ReadinessProbeEventUpdate" + DirectAPIServerEventUpdate RoleUpdateMechanism = "DirectAPIServerEventUpdate" + NoneUpdate RoleUpdateMechanism = "None" +) + // RoleProbe defines how to observe role type RoleProbe struct { // ProbeActions define Actions to be taken in serial. @@ -255,6 +265,12 @@ type RoleProbe struct { // +kubebuilder:validation:Minimum=1 // +optional FailureThreshold int32 `json:"failureThreshold,omitempty"` + + // RoleUpdateMechanism specifies the way how pod role label being updated. + // +kubebuilder:default=None + // +kubebuilder:validation:Enum={ReadinessProbeEventUpdate, DirectAPIServerEventUpdate, None} + // +optional + RoleUpdateMechanism RoleUpdateMechanism `json:"roleUpdateMechanism,omitempty"` } type Credential struct { diff --git a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml index a86c3b22a9e..abe37f1b831 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml @@ -8318,6 +8318,235 @@ spec: - Parallel type: string type: object + rsmSpec: + description: RSMSpec defines workload related spec of this component. + start from KB 0.7.0, RSM(ReplicatedStateMachineSpec) will + be the underlying CR which powers all kinds of workload in + KB. RSM is an enhanced stateful workload extension dedicated + for heavy-state workloads like databases. + properties: + memberUpdateStrategy: + description: 'MemberUpdateStrategy, Members(Pods) update + strategy. serial: update Members one by one that guarantee + minimum component unavailable time. Learner -> Follower(with + AccessMode=none) -> Follower(with AccessMode=readonly) + -> Follower(with AccessMode=readWrite) -> Leader bestEffortParallel: + update Members in parallel that guarantee minimum component + un-writable time. Learner, Follower(minority) in parallel + -> Follower(majority) -> Leader, keep majority online + all the time. parallel: force parallel' + enum: + - Serial + - BestEffortParallel + - Parallel + type: string + membershipReconfiguration: + description: MembershipReconfiguration provides actions + to do membership dynamic reconfiguration. + properties: + logSyncAction: + description: LogSyncAction specifies how to trigger + the new member to start log syncing previous none-nil + action's Image wil be used if not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + memberJoinAction: + description: MemberJoinAction specifies how to add member + previous none-nil action's Image wil be used if not + configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + memberLeaveAction: + description: MemberLeaveAction specifies how to remove + member previous none-nil action's Image wil be used + if not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + promoteAction: + description: PromoteAction specifies how to tell the + cluster that the new member can join voting now previous + none-nil action's Image wil be used if not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + switchoverAction: + description: SwitchoverAction specifies how to do switchover + latest [BusyBox](https://busybox.net/) image will + be used if Image not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + type: object + roleProbe: + description: RoleProbe provides method to probe role. + properties: + failureThreshold: + default: 3 + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + default: 0 + description: Number of seconds after the container has + started before role probe has started. + format: int32 + minimum: 0 + type: integer + periodSeconds: + default: 2 + description: How often (in seconds) to perform the probe. + Default to 2 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + probeActions: + description: 'ProbeActions define Actions to be taken + in serial. after all actions done, the final output + should be a single string of the role name defined + in spec.Roles latest [BusyBox](https://busybox.net/) + image will be used if Image not configured Environment + variables can be used in Command: - v_KB_RSM_LAST_STDOUT + stdout from last action, watch ''v_'' prefixed - KB_RSM_USERNAME + username part of credential - KB_RSM_PASSWORD password + part of credential' + items: + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + type: array + roleUpdateMechanism: + default: None + description: RoleUpdateMechanism specifies the way how + pod role label being updated. + enum: + - ReadinessProbeEventUpdate + - DirectAPIServerEventUpdate + - None + type: string + successThreshold: + default: 1 + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + default: 1 + description: Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. + format: int32 + minimum: 1 + type: integer + required: + - probeActions + type: object + roles: + description: Roles, a list of roles defined in the system. + items: + properties: + accessMode: + default: ReadWrite + description: AccessMode, what service this member + capable. + enum: + - None + - Readonly + - ReadWrite + type: string + canVote: + default: true + description: CanVote, whether this member has voting + rights + type: boolean + isLeader: + default: false + description: IsLeader, whether this member is the + leader + type: boolean + name: + default: leader + description: Name, role name. + type: string + required: + - accessMode + - name + type: object + type: array + type: object scriptSpecs: description: The scriptSpec field provided by provider, and finally this configTemplateRefs will be rendered into the diff --git a/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml b/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml index 2836ac341b7..94fbb5bca94 100644 --- a/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml +++ b/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml @@ -41,7 +41,7 @@ spec: schema: openAPIV3Schema: description: ReplicatedStateMachine is the Schema for the replicatedstatemachines - API + API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -970,6 +970,15 @@ spec: - command type: object type: array + roleUpdateMechanism: + default: None + description: RoleUpdateMechanism specifies the way how pod role + label being updated. + enum: + - ReadinessProbeEventUpdate + - DirectAPIServerEventUpdate + - None + type: string successThreshold: default: 1 description: Minimum consecutive successes for the probe to be diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 192f52d5806..5c03360ba33 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -5,6 +5,26 @@ metadata: creationTimestamp: null name: mutating-webhook-configuration webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine + failurePolicy: Fail + name: mreplicatedstatemachine.kb.io + rules: + - apiGroups: + - workloads.kubeblocks.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - replicatedstatemachines + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -45,15 +65,22 @@ webhooks: resources: - servicedescriptors sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: - admissionReviewVersions: - v1 clientConfig: service: name: webhook-service namespace: system - path: /mutate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine + path: /validate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine failurePolicy: Fail - name: mreplicatedstatemachine.kb.io + name: vreplicatedstatemachine.kb.io rules: - apiGroups: - workloads.kubeblocks.io @@ -65,13 +92,6 @@ webhooks: resources: - replicatedstatemachines sideEffects: None ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: validating-webhook-configuration -webhooks: - admissionReviewVersions: - v1 clientConfig: @@ -172,23 +192,3 @@ webhooks: resources: - servicedescriptors sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /validate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine - failurePolicy: Fail - name: vreplicatedstatemachine.kb.io - rules: - - apiGroups: - - workloads.kubeblocks.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - replicatedstatemachines - sideEffects: None diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 3c8fc176107..7da5453691d 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -61,7 +61,6 @@ import ( testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" viper "github.com/apecloud/kubeblocks/internal/viperx" lorry "github.com/apecloud/kubeblocks/lorry/client" - lorryutil "github.com/apecloud/kubeblocks/lorry/util" ) const ( @@ -1541,32 +1540,6 @@ var _ = Describe("Cluster Controller", func() { }) } - mockRoleChangedEvent := func(key types.NamespacedName, sts *appsv1.StatefulSet) []corev1.Event { - pods, err := components.GetPodListByStatefulSet(ctx, k8sClient, sts) - Expect(err).To(Succeed()) - - events := make([]corev1.Event, 0) - for _, pod := range pods { - event := corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name + "-event", - Namespace: testCtx.DefaultNamespace, - }, - Reason: string(lorryutil.CheckRoleOperation), - Message: `{"event":"Success","originalRole":"Leader","role":"Follower"}`, - InvolvedObject: corev1.ObjectReference{ - Name: pod.Name, - Namespace: testCtx.DefaultNamespace, - UID: pod.UID, - FieldPath: constant.ProbeCheckRolePath, - }, - } - events = append(events, event) - } - events[0].Message = `{"event":"Success","originalRole":"Leader","role":"Leader"}` - return events - } - getStsPodsName := func(sts *appsv1.StatefulSet) []string { pods, err := components.GetPodListByStatefulSet(ctx, k8sClient, sts) Expect(err).To(Succeed()) @@ -1615,7 +1588,7 @@ var _ = Describe("Cluster Controller", func() { By("Creating mock pods in StatefulSet, and set controller reference") pods := mockPodsForTest(clusterObj, replicas) - for _, pod := range pods { + for i, pod := range pods { Expect(controllerutil.SetControllerReference(sts, &pod, scheme.Scheme)).Should(Succeed()) Expect(testCtx.CreateObj(testCtx.Ctx, &pod)).Should(Succeed()) patch := client.MergeFrom(pod.DeepCopy()) @@ -1624,15 +1597,14 @@ var _ = Describe("Cluster Controller", func() { Type: corev1.PodReady, Status: corev1.ConditionTrue, }} - // ERROR: the object has been modified; please apply your changes to the latest version and try again Eventually(k8sClient.Status().Patch(ctx, &pod, patch)).Should(Succeed()) - } - - By("Creating mock role changed events") - // pod.Labels[intctrlutil.RoleLabelKey] will be filled with the role - events := mockRoleChangedEvent(clusterKey, sts) - for _, event := range events { - Expect(testCtx.CreateObj(ctx, &event)).Should(Succeed()) + role := "follower" + if i == 0 { + role = "leader" + } + patch = client.MergeFrom(pod.DeepCopy()) + pod.Labels[constant.RoleLabelKey] = role + Eventually(k8sClient.Patch(ctx, &pod, patch)).Should(Succeed()) } By("Checking pods' role are changed accordingly") diff --git a/controllers/apps/components/consensus_set_utils.go b/controllers/apps/components/consensus_set_utils.go index ad15b2e8560..e7cf775d2ab 100644 --- a/controllers/apps/components/consensus_set_utils.go +++ b/controllers/apps/components/consensus_set_utils.go @@ -194,7 +194,7 @@ func updateConsensusSetRoleLabel(cli client.Client, if pod.Annotations == nil { pod.Annotations = map[string]string{} } - pod.Annotations[constant.LastRoleChangedEventTimestampAnnotationKey] = event.EventTime.Time.Format(time.RFC3339Nano) + pod.Annotations[constant.LastRoleSnapshotVersionAnnotationKey] = event.EventTime.Time.Format(time.RFC3339Nano) return cli.Patch(ctx, pod, patch) } diff --git a/controllers/apps/components/replication_set_utils.go b/controllers/apps/components/replication_set_utils.go index f985a677aad..ed117121417 100644 --- a/controllers/apps/components/replication_set_utils.go +++ b/controllers/apps/components/replication_set_utils.go @@ -96,7 +96,7 @@ func updateObjRoleChangedInfo[T generics.Object, PT generics.PObject[T]]( if pObj.GetAnnotations() == nil { pObj.SetAnnotations(map[string]string{}) } - pObj.GetAnnotations()[constant.LastRoleChangedEventTimestampAnnotationKey] = event.EventTime.Time.Format(time.RFC3339Nano) + pObj.GetAnnotations()[constant.LastRoleSnapshotVersionAnnotationKey] = event.EventTime.Time.Format(time.RFC3339Nano) if err := cli.Patch(ctx, pObj, patch); err != nil { return err } diff --git a/controllers/k8score/event_controller_test.go b/controllers/k8score/event_controller_test.go index 53215fe656f..f461d0714e2 100644 --- a/controllers/k8score/event_controller_test.go +++ b/controllers/k8score/event_controller_test.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/builder" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" @@ -123,9 +124,30 @@ var _ = Describe("Event Controller", func() { Create(&testCtx).GetObject() Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(clusterObj), &appsv1alpha1.Cluster{}, true)).Should(Succeed()) + rsmName := fmt.Sprintf("%s-%s", clusterObj.Name, consensusCompName) + rsm := testapps.NewRSMFactory(clusterObj.Namespace, rsmName, clusterObj.Name, consensusCompName). + SetReplicas(int32(3)). + AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). + Create(&testCtx).GetObject() + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(tmpRSM *workloads.ReplicatedStateMachine) { + tmpRSM.Spec.Roles = []workloads.ReplicaRole{ + { + Name: "leader", + IsLeader: true, + AccessMode: workloads.ReadWriteMode, + CanVote: true, + }, + { + Name: "follower", + IsLeader: false, + AccessMode: workloads.ReadonlyMode, + CanVote: true, + }, + } + })()).Should(Succeed()) By("create involved pod") var uid types.UID - podName := "foo" + podName := fmt.Sprintf("%s-%d", rsmName, 0) pod := createInvolvedPod(podName, clusterObj.Name, consensusCompName) Expect(testCtx.CreateObj(ctx, pod)).Should(Succeed()) Eventually(func() error { @@ -159,13 +181,13 @@ var _ = Describe("Event Controller", func() { g.Expect(p).ShouldNot(BeNil()) g.Expect(p.Labels).ShouldNot(BeNil()) g.Expect(p.Labels[constant.RoleLabelKey]).Should(Equal(role)) - g.Expect(p.Annotations[constant.LastRoleChangedEventTimestampAnnotationKey]).Should(Equal(sndEvent.EventTime.Time.Format(time.RFC3339Nano))) + g.Expect(p.Annotations[constant.LastRoleSnapshotVersionAnnotationKey]).Should(Equal(sndEvent.EventTime.Time.Format(time.RFC3339Nano))) })).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(sndEvent), func(g Gomega, e *corev1.Event) { g.Expect(e).ShouldNot(BeNil()) g.Expect(e.Annotations).ShouldNot(BeNil()) - g.Expect(e.Annotations[roleChangedAnnotKey]).Should(Equal(trueStr)) + g.Expect(e.Annotations[roleChangedAnnotKey]).Should(Equal("count-0")) })).Should(Succeed()) By("check whether the duration and number of events reach the threshold") @@ -190,7 +212,7 @@ var _ = Describe("Event Controller", func() { g.Expect(p).ShouldNot(BeNil()) g.Expect(p.Labels).ShouldNot(BeNil()) g.Expect(p.Labels[constant.RoleLabelKey]).ShouldNot(Equal(role)) - g.Expect(p.Annotations[constant.LastRoleChangedEventTimestampAnnotationKey]).ShouldNot(Equal(sndInvalidEvent.EventTime.Time.Format(time.RFC3339Nano))) + g.Expect(p.Annotations[constant.LastRoleSnapshotVersionAnnotationKey]).ShouldNot(Equal(sndInvalidEvent.EventTime.Time.Format(time.RFC3339Nano))) })).Should(Succeed()) By("send role changed event with afterLastTS later than pod last role changes event timestamp annotation should be update successfully") @@ -212,7 +234,7 @@ var _ = Describe("Event Controller", func() { g.Expect(p).ShouldNot(BeNil()) g.Expect(p.Labels).ShouldNot(BeNil()) g.Expect(p.Labels[constant.RoleLabelKey]).Should(Equal(role)) - g.Expect(p.Annotations[constant.LastRoleChangedEventTimestampAnnotationKey]).Should(Equal(sndValidEvent.EventTime.Time.Format(time.RFC3339Nano))) + g.Expect(p.Annotations[constant.LastRoleSnapshotVersionAnnotationKey]).Should(Equal(sndValidEvent.EventTime.Time.Format(time.RFC3339Nano))) })).Should(Succeed()) }) }) diff --git a/controllers/k8score/event_handler.go b/controllers/k8score/event_handler.go index 21ec2b1cbea..7e002904183 100644 --- a/controllers/k8score/event_handler.go +++ b/controllers/k8score/event_handler.go @@ -35,6 +35,5 @@ type EventHandler interface { var EventHandlerMap = map[string]EventHandler{} func init() { - EventHandlerMap["role-change-handler"] = &RoleChangeEventHandler{} EventHandlerMap["rsm-event-handler"] = &rsm.PodRoleEventHandler{} } diff --git a/controllers/k8score/role_change_event_handler.go b/controllers/k8score/role_change_event_handler.go index 6c4d2e63b43..dd7db82781a 100644 --- a/controllers/k8score/role_change_event_handler.go +++ b/controllers/k8score/role_change_event_handler.go @@ -102,7 +102,7 @@ func handleRoleChangedEvent(cli client.Client, reqCtx intctrlutil.RequestCtx, re // compare the EventTime of the current event object with the lastTimestamp of the last recorded in the pod annotation, // if the current event's EventTime is earlier than the recorded lastTimestamp in the pod annotation, // it indicates that the current event has arrived out of order and is expired, so it should not be processed. - lastTimestampStr, ok := pod.Annotations[constant.LastRoleChangedEventTimestampAnnotationKey] + lastTimestampStr, ok := pod.Annotations[constant.LastRoleSnapshotVersionAnnotationKey] if ok { lastTimestamp, err := time.Parse(time.RFC3339Nano, lastTimestampStr) if err != nil { @@ -137,88 +137,3 @@ func handleRoleChangedEvent(cli client.Client, reqCtx intctrlutil.RequestCtx, re } return role, nil } - -// handleGlobalInfoEvent handles cluster role changed event and return err if occurs. -// func handleGlobalInfoEvent(cli client.Client, reqCtx intctrlutil.RequestCtx, recorder record.EventRecorder, event *corev1.Event) error { -// // parse probe event message -// global := ParseProbeEventMessage(reqCtx, event) -// if global == nil { -// reqCtx.Log.Info("parse probe event message failed", "message", event.Message) -// return nil -// } -// -// // if probe event operation is not implemented, check role failed or invalid, ignore it -// if global.Event == ProbeEventOperationNotImpl || global.Event == ProbeEventCheckRoleFailed || global.Event == ProbeEventRoleInvalid { -// reqCtx.Log.Info("probe event failed") -// return nil -// } -// -// // check term -// if global.Term < term { -// reqCtx.Log.Info("out of date message", "message", event.Message) -// return nil -// } -// term = global.Term -// -// // get pod -// pod := &corev1.Pod{} -// pods := &corev1.PodList{} -// err := cli.List(reqCtx.Ctx, pods, client.InNamespace(event.InvolvedObject.Namespace)) -// if err != nil || len(pods.Items) == 0 { -// return err -// } -// for _, p := range pods.Items { -// if p.Name == event.InvolvedObject.Name { -// pod = &p -// break -// } -// } -// // event belongs to old pod with the same name, ignore it -// if pod.UID != event.InvolvedObject.UID { -// return nil -// } -// -// // get cluster obj of the pod -// cluster := &appsv1alpha1.Cluster{} -// if err := cli.Get(reqCtx.Ctx, types.NamespacedName{ -// Namespace: event.InvolvedObject.Namespace, -// Name: pod.Labels[constant.AppInstanceLabelKey], -// }, cluster); err != nil { -// return err -// } -// -// // get component name -// reqCtx.Log.V(1).Info("handle role changed event", "event uid", event.UID, "cluster", cluster.Name, "pod", pod.Name) -// compName, componentDef, err := components.GetComponentInfoByPod(reqCtx.Ctx, cli, *cluster, pod) -// if err != nil { -// return err -// } -// -// // pod involved is a follower, just update single pod -// if global.Message != "" { -// role := strings.ToLower(global.Message) -// return components.UpdateConsensusSetRoleLabel(cli, reqCtx, event, componentDef, pod, role) -// } -// -// switch componentDef.WorkloadType { -// case appsv1alpha1.Consensus: -// for _, pod := range pods.Items { -// if role, ok := global.PodName2Role[pod.Name]; ok { -// err := components.UpdateConsensusSetRoleLabel(cli, reqCtx, event, componentDef, &pod, role) -// if err != nil { -// return err -// } -// } -// } -// case appsv1alpha1.Replication: -// for _, pod := range pods.Items { -// if role, ok := global.PodName2Role[pod.Status.PodIP]; ok { -// err := components.HandleReplicationSetRoleChangeEvent(cli, reqCtx, event, cluster, compName, &pod, role) -// if err != nil { -// return err -// } -// } -// } -// } -// return nil -// } diff --git a/controllers/k8score/suite_test.go b/controllers/k8score/suite_test.go index 1557b7a067d..f81b2aea10a 100644 --- a/controllers/k8score/suite_test.go +++ b/controllers/k8score/suite_test.go @@ -40,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/testutil" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -95,6 +96,9 @@ var _ = BeforeSuite(func() { err = appsv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = workloads.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml index a86c3b22a9e..abe37f1b831 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml @@ -8318,6 +8318,235 @@ spec: - Parallel type: string type: object + rsmSpec: + description: RSMSpec defines workload related spec of this component. + start from KB 0.7.0, RSM(ReplicatedStateMachineSpec) will + be the underlying CR which powers all kinds of workload in + KB. RSM is an enhanced stateful workload extension dedicated + for heavy-state workloads like databases. + properties: + memberUpdateStrategy: + description: 'MemberUpdateStrategy, Members(Pods) update + strategy. serial: update Members one by one that guarantee + minimum component unavailable time. Learner -> Follower(with + AccessMode=none) -> Follower(with AccessMode=readonly) + -> Follower(with AccessMode=readWrite) -> Leader bestEffortParallel: + update Members in parallel that guarantee minimum component + un-writable time. Learner, Follower(minority) in parallel + -> Follower(majority) -> Leader, keep majority online + all the time. parallel: force parallel' + enum: + - Serial + - BestEffortParallel + - Parallel + type: string + membershipReconfiguration: + description: MembershipReconfiguration provides actions + to do membership dynamic reconfiguration. + properties: + logSyncAction: + description: LogSyncAction specifies how to trigger + the new member to start log syncing previous none-nil + action's Image wil be used if not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + memberJoinAction: + description: MemberJoinAction specifies how to add member + previous none-nil action's Image wil be used if not + configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + memberLeaveAction: + description: MemberLeaveAction specifies how to remove + member previous none-nil action's Image wil be used + if not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + promoteAction: + description: PromoteAction specifies how to tell the + cluster that the new member can join voting now previous + none-nil action's Image wil be used if not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + switchoverAction: + description: SwitchoverAction specifies how to do switchover + latest [BusyBox](https://busybox.net/) image will + be used if Image not configured + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + type: object + roleProbe: + description: RoleProbe provides method to probe role. + properties: + failureThreshold: + default: 3 + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + default: 0 + description: Number of seconds after the container has + started before role probe has started. + format: int32 + minimum: 0 + type: integer + periodSeconds: + default: 2 + description: How often (in seconds) to perform the probe. + Default to 2 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + probeActions: + description: 'ProbeActions define Actions to be taken + in serial. after all actions done, the final output + should be a single string of the role name defined + in spec.Roles latest [BusyBox](https://busybox.net/) + image will be used if Image not configured Environment + variables can be used in Command: - v_KB_RSM_LAST_STDOUT + stdout from last action, watch ''v_'' prefixed - KB_RSM_USERNAME + username part of credential - KB_RSM_PASSWORD password + part of credential' + items: + properties: + command: + description: Command will be executed in Container + to retrieve or process role info + items: + type: string + type: array + image: + description: utility image contains command that + can be used to retrieve of process role info + type: string + required: + - command + type: object + type: array + roleUpdateMechanism: + default: None + description: RoleUpdateMechanism specifies the way how + pod role label being updated. + enum: + - ReadinessProbeEventUpdate + - DirectAPIServerEventUpdate + - None + type: string + successThreshold: + default: 1 + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + default: 1 + description: Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. + format: int32 + minimum: 1 + type: integer + required: + - probeActions + type: object + roles: + description: Roles, a list of roles defined in the system. + items: + properties: + accessMode: + default: ReadWrite + description: AccessMode, what service this member + capable. + enum: + - None + - Readonly + - ReadWrite + type: string + canVote: + default: true + description: CanVote, whether this member has voting + rights + type: boolean + isLeader: + default: false + description: IsLeader, whether this member is the + leader + type: boolean + name: + default: leader + description: Name, role name. + type: string + required: + - accessMode + - name + type: object + type: array + type: object scriptSpecs: description: The scriptSpec field provided by provider, and finally this configTemplateRefs will be rendered into the diff --git a/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml b/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml index 2836ac341b7..94fbb5bca94 100644 --- a/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml +++ b/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml @@ -41,7 +41,7 @@ spec: schema: openAPIV3Schema: description: ReplicatedStateMachine is the Schema for the replicatedstatemachines - API + API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -970,6 +970,15 @@ spec: - command type: object type: array + roleUpdateMechanism: + default: None + description: RoleUpdateMechanism specifies the way how pod role + label being updated. + enum: + - ReadinessProbeEventUpdate + - DirectAPIServerEventUpdate + - None + type: string successThreshold: default: 1 description: Minimum consecutive successes for the probe to be diff --git a/internal/common/doc.go b/internal/common/doc.go new file mode 100644 index 00000000000..878d6bf35ab --- /dev/null +++ b/internal/common/doc.go @@ -0,0 +1,24 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +/* +Package common provides types and utils shared by all KubeBlocks components: KubeBlocks Core, KBCLI, Lorry etc. +will promote to pkg/common when stable. +*/ +package common diff --git a/internal/common/types.go b/internal/common/types.go new file mode 100644 index 00000000000..f4279a1c9f4 --- /dev/null +++ b/internal/common/types.go @@ -0,0 +1,39 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package common + +// PodRoleNamePair defines a pod name and role name pair. +type PodRoleNamePair struct { + PodName string `json:"podName,omitempty"` + RoleName string `json:"roleName,omitempty"` +} + +// GlobalRoleSnapshot defines a global(leader) perspective of all pods role. +// KB provides two role probe methods: per-pod level role probe and retrieving all node roles from the leader node. +// The latter is referred to as the global role snapshot. This data structure is used to represent a snapshot of global role information. +// The snapshot contains two types of information: the mapping relationship between all node names and role names, +// and the version of the snapshot. The purpose of the snapshot version is to ensure that only role information +// that is more up-to-date than the current role information on the Pod Label will be updated. This resolves the issue of +// role information disorder in scenarios such as KB upgrades or exceptions causing restarts, +// network partitioning leading to split-brain situations, node crashes, and similar occurrences. +type GlobalRoleSnapshot struct { + Version string `json:"term,omitempty"` + PodRoleNamePairs []PodRoleNamePair `json:"PodRoleNamePairs,omitempty"` +} diff --git a/internal/constant/const.go b/internal/constant/const.go index d6fdb10c92e..f7b37d54bd9 100644 --- a/internal/constant/const.go +++ b/internal/constant/const.go @@ -151,7 +151,7 @@ const ( ConfigAppliedVersionAnnotationKey = "config.kubeblocks.io/config-applied-version" KubeBlocksGenerationKey = "kubeblocks.io/generation" ExtraEnvAnnotationKey = "kubeblocks.io/extra-env" - LastRoleChangedEventTimestampAnnotationKey = "apps.kubeblocks.io/last-role-changed-event-timestamp" + LastRoleSnapshotVersionAnnotationKey = "apps.kubeblocks.io/last-role-snapshot-version" // kubeblocks.io well-known finalizers DBClusterFinalizerName = "cluster.kubeblocks.io/finalizer" @@ -229,22 +229,14 @@ const ( ProbeHTTPPortName = "probe-http-port" ProbeGRPCPortName = "probe-grpc-port" ProbeInitContainerName = "kb-initprobe" - RoleProbeContainerName = "kb-checkrole" + LorryContainerName = "kb-lorry" StatusProbeContainerName = "kb-checkstatus" RunningProbeContainerName = "kb-checkrunning" VolumeProtectionProbeContainerName = "kb-volume-protection" // the filedpath name used in event.InvolvedObject.FieldPath - ProbeCheckRolePath = "spec.containers{" + RoleProbeContainerName + "}" ProbeCheckStatusPath = "spec.containers{" + StatusProbeContainerName + "}" ProbeCheckRunningPath = "spec.containers{" + RunningProbeContainerName + "}" - - ProbeAgentMountName = "shell2http-mount" - ProbeAgentMountPath = "/shell2http" - ProbeAgent = "shell2http" - ProbeAgentImage = "msoap/shell2http:1.16.0" - OriginBinaryPath = "/app/shell2http" - DefaultActionImage = "busybox:latest" ) const ( diff --git a/internal/controller/component/component.go b/internal/controller/component/component.go index 0fdadaea90b..f8a7dc77db0 100644 --- a/internal/controller/component/component.go +++ b/internal/controller/component/component.go @@ -201,6 +201,7 @@ func buildComponent(reqCtx intctrlutil.RequestCtx, StatefulSpec: clusterCompDefObj.StatefulSpec, ConsensusSpec: clusterCompDefObj.ConsensusSpec, ReplicationSpec: clusterCompDefObj.ReplicationSpec, + RSMSpec: clusterCompDefObj.RSMSpec, PodSpec: clusterCompDefObj.PodSpec, Probes: clusterCompDefObj.Probes, LogConfigs: clusterCompDefObj.LogConfigs, @@ -278,18 +279,17 @@ func buildComponent(reqCtx intctrlutil.RequestCtx, } } - // probe container requires a service account with adequate privileges. - // If probes are required and the serviceAccountName is not set, + buildMonitorConfig(clusterCompDefObj, clusterCompSpec, component) + + // lorry container requires a service account with adequate privileges. + // If lorry required and the serviceAccountName is not set, // a default serviceAccountName will be assigned. if component.ServiceAccountName == "" && component.Probes != nil { component.ServiceAccountName = "kb-" + component.ClusterName } - // set component.PodSpec.ServiceAccountName component.PodSpec.ServiceAccountName = component.ServiceAccountName - - buildMonitorConfig(clusterCompDefObj, clusterCompSpec, component) - if err = buildProbeContainers(reqCtx, component); err != nil { + if err = buildLorryContainers(reqCtx, component); err != nil { reqCtx.Log.Error(err, "build probe container failed.") return nil, err } diff --git a/internal/controller/component/component_test.go b/internal/controller/component/component_test.go index 9fb9b84aff8..b2210e1e667 100644 --- a/internal/controller/component/component_test.go +++ b/internal/controller/component/component_test.go @@ -125,7 +125,7 @@ var _ = Describe("component module", func() { nil, &clusterVersion.Spec.ComponentVersions[1]) Expect(err).Should(Succeed()) - Expect(len(component.PodSpec.Containers)).Should(Equal(4)) + Expect(len(component.PodSpec.Containers)).Should(Equal(3)) By("new init container in clusterVersion not in clusterDefinition") component, err = BuildComponent( diff --git a/internal/controller/component/probe_utils.go b/internal/controller/component/probe_utils.go index 6c368a725b8..df1425429ea 100644 --- a/internal/controller/component/probe_utils.go +++ b/internal/controller/component/probe_utils.go @@ -23,7 +23,6 @@ import ( "encoding/json" "fmt" "strconv" - "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -37,8 +36,6 @@ import ( const ( // http://localhost:/v1.0/bindings/ - checkRoleURIFormat = "/v1.0/bindings/%s?operation=checkRole&workloadType=%s" - getGlobalInfoFormat = "/v1.0/bindings/%s?operation=getGlobalInfo" checkRunningURIFormat = "/v1.0/bindings/%s?operation=checkRunning" checkStatusURIFormat = "/v1.0/bindings/%s?operation=checkStatus" volumeProtectionURIFormat = "/v1.0/bindings/%s?operation=volumeProtection" @@ -55,61 +52,60 @@ var ( } ) -func buildProbeContainers(reqCtx intctrlutil.RequestCtx, component *SynthesizedComponent) error { - container := buildProbeContainer() - probeContainers := []corev1.Container{} - componentProbes := component.Probes - if componentProbes == nil { +func buildLorryContainers(reqCtx intctrlutil.RequestCtx, component *SynthesizedComponent) error { + container := buildLorryContainer() + lorryContainers := []corev1.Container{} + componentLorry := component.Probes + if componentLorry == nil { return nil } - reqCtx.Log.V(3).Info("probe", "settings", componentProbes) - probeSvcHTTPPort := viper.GetInt32("PROBE_SERVICE_HTTP_PORT") - probeSvcGRPCPort := viper.GetInt32("PROBE_SERVICE_GRPC_PORT") - availablePorts, err := getAvailableContainerPorts(component.PodSpec.Containers, []int32{probeSvcHTTPPort, probeSvcGRPCPort}) - probeSvcHTTPPort = availablePorts[0] - probeSvcGRPCPort = availablePorts[1] + reqCtx.Log.V(3).Info("lorry", "settings", componentLorry) + lorrySvcHTTPPort := viper.GetInt32("PROBE_SERVICE_HTTP_PORT") + lorrySvcGRPCPort := viper.GetInt32("PROBE_SERVICE_GRPC_PORT") + // override by new env name + if viper.IsSet("LORRY_SERVICE_HTTP_PORT") { + lorrySvcHTTPPort = viper.GetInt32("LORRY_SERVICE_HTTP_PORT") + } + if viper.IsSet("LORRY_SERVICE_GRPC_PORT") { + lorrySvcGRPCPort = viper.GetInt32("LORRY_SERVICE_GRPC_PORT") + } + availablePorts, err := getAvailableContainerPorts(component.PodSpec.Containers, []int32{lorrySvcHTTPPort, lorrySvcGRPCPort}) + lorrySvcHTTPPort = availablePorts[0] + lorrySvcGRPCPort = availablePorts[1] if err != nil { - reqCtx.Log.Info("get probe container port failed", "error", err) + reqCtx.Log.Info("get lorry container port failed", "error", err) return err } - // injectHttp2Shell(component.PodSpec) - - if componentProbes.RoleProbe != nil { - roleChangedContainer := container.DeepCopy() - buildRoleProbeContainer(component, roleChangedContainer, componentProbes.RoleProbe, int(probeSvcHTTPPort), component.PodSpec) - probeContainers = append(probeContainers, *roleChangedContainer) - } - - if componentProbes.StatusProbe != nil { + if componentLorry.StatusProbe != nil { statusProbeContainer := container.DeepCopy() - buildStatusProbeContainer(component.CharacterType, statusProbeContainer, componentProbes.StatusProbe, int(probeSvcHTTPPort)) - probeContainers = append(probeContainers, *statusProbeContainer) + buildStatusProbeContainer(component.CharacterType, statusProbeContainer, componentLorry.StatusProbe, int(lorrySvcHTTPPort)) + lorryContainers = append(lorryContainers, *statusProbeContainer) } - if componentProbes.RunningProbe != nil { + if componentLorry.RunningProbe != nil { runningProbeContainer := container.DeepCopy() - buildRunningProbeContainer(component.CharacterType, runningProbeContainer, componentProbes.RunningProbe, int(probeSvcHTTPPort)) - probeContainers = append(probeContainers, *runningProbeContainer) + buildRunningProbeContainer(component.CharacterType, runningProbeContainer, componentLorry.RunningProbe, int(lorrySvcHTTPPort)) + lorryContainers = append(lorryContainers, *runningProbeContainer) } if volumeProtectionEnabled(component) { c := container.DeepCopy() - buildVolumeProtectionProbeContainer(component.CharacterType, c, int(probeSvcHTTPPort)) - probeContainers = append(probeContainers, *c) + buildVolumeProtectionProbeContainer(component.CharacterType, c, int(lorrySvcHTTPPort)) + lorryContainers = append(lorryContainers, *c) } - if len(probeContainers) >= 1 { - container := &probeContainers[0] - buildProbeServiceContainer(component, container, int(probeSvcHTTPPort), int(probeSvcGRPCPort)) + if len(lorryContainers) >= 1 { + container := &lorryContainers[0] + buildLorryServiceContainer(component, container, int(lorrySvcHTTPPort), int(lorrySvcGRPCPort)) } - reqCtx.Log.V(1).Info("probe", "containers", probeContainers) - component.PodSpec.Containers = append(component.PodSpec.Containers, probeContainers...) + reqCtx.Log.V(1).Info("lorry", "containers", lorryContainers) + component.PodSpec.Containers = append(component.PodSpec.Containers, lorryContainers...) return nil } -func buildProbeContainer() *corev1.Container { +func buildLorryContainer() *corev1.Container { return builder.NewContainerBuilder("string"). SetImage("registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"). SetImagePullPolicy(corev1.PullIfNotPresent). @@ -142,10 +138,10 @@ func buildProbeContainer() *corev1.Container { GetObject() } -func buildProbeServiceContainer(component *SynthesizedComponent, container *corev1.Container, probeSvcHTTPPort int, probeSvcGRPCPort int) { +func buildLorryServiceContainer(component *SynthesizedComponent, container *corev1.Container, probeSvcHTTPPort int, probeSvcGRPCPort int) { container.Image = viper.GetString(constant.KBToolsImage) container.ImagePullPolicy = corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)) - container.Command = []string{"probe", + container.Command = []string{"lorry", "--port", strconv.Itoa(probeSvcHTTPPort)} if len(component.PodSpec.Containers) > 0 { @@ -180,14 +176,6 @@ func buildProbeServiceContainer(component *SynthesizedComponent, container *core } } - roles := getComponentRoles(component) - rolesJSON, _ := json.Marshal(roles) - container.Env = append(container.Env, corev1.EnvVar{ - Name: constant.KBEnvServiceRoles, - Value: string(rolesJSON), - ValueFrom: nil, - }) - container.Env = append(container.Env, corev1.EnvVar{ Name: constant.KBEnvCharacterType, Value: component.CharacterType, @@ -200,12 +188,6 @@ func buildProbeServiceContainer(component *SynthesizedComponent, container *core ValueFrom: nil, }) - container.Env = append(container.Env, corev1.EnvVar{ - Name: "KB_RSM_ACTION_SVC_LIST", - Value: viper.GetString("KB_RSM_ACTION_SVC_LIST"), - ValueFrom: nil, - }) - container.Ports = []corev1.ContainerPort{ { ContainerPort: int32(probeSvcHTTPPort), @@ -218,66 +200,12 @@ func buildProbeServiceContainer(component *SynthesizedComponent, container *core Protocol: "TCP", }} - // pass the volume protection spec to probe container through env. + // pass the volume protection spec to lorry container through env. if volumeProtectionEnabled(component) { container.Env = append(container.Env, env4VolumeProtection(*component.VolumeProtection)) } } -func getComponentRoles(component *SynthesizedComponent) map[string]string { - var roles = map[string]string{} - if component.ConsensusSpec == nil { - return roles - } - - consensus := component.ConsensusSpec - roles[strings.ToLower(consensus.Leader.Name)] = string(consensus.Leader.AccessMode) - for _, follower := range consensus.Followers { - roles[strings.ToLower(follower.Name)] = string(follower.AccessMode) - } - if consensus.Learner != nil { - roles[strings.ToLower(consensus.Learner.Name)] = string(consensus.Learner.AccessMode) - } - return roles -} - -func buildRoleProbeContainer(component *SynthesizedComponent, roleChangedContainer *corev1.Container, - probeSetting *appsv1alpha1.ClusterDefinitionProbe, probeSvcHTTPPort int, pod *corev1.PodSpec) { - roleChangedContainer.Name = constant.RoleProbeContainerName - probe := roleChangedContainer.ReadinessProbe - bindingType := strings.ToLower(component.CharacterType) - workloadType := component.WorkloadType - httpGet := &corev1.HTTPGetAction{} - httpGet.Path = fmt.Sprintf(checkRoleURIFormat, bindingType, workloadType) - httpGet.Port = intstr.FromInt(probeSvcHTTPPort) - probe.Exec = nil - probe.HTTPGet = httpGet - probe.PeriodSeconds = probeSetting.PeriodSeconds - probe.TimeoutSeconds = probeSetting.TimeoutSeconds - probe.FailureThreshold = probeSetting.FailureThreshold - roleChangedContainer.StartupProbe.TCPSocket.Port = intstr.FromInt(probeSvcHTTPPort) - - // -> uncomment it to enable snapshot to cluster - - // base := probeSvcHTTPPort + 2 - // portNeeded := len(probeSetting.Actions) - // activePorts := make([]int32, portNeeded) - // for i := 0; i < portNeeded; i++ { - // activePorts[i] = int32(base + i) - // } - // activePorts, err := getAvailableContainerPorts(pod.Containers, activePorts) - // if err != nil { - // return - // } - // marshal, err := json.Marshal(activePorts) - // if err != nil { - // return - // } - // viper.Set("KB_RSM_ACTION_SVC_LIST", string(marshal)) - - // injectProbeUtilImages(pod, probeSetting, activePorts, "/role", "checkrole", roleChangedContainer.Env) -} - func buildStatusProbeContainer(characterType string, statusProbeContainer *corev1.Container, probeSetting *appsv1alpha1.ClusterDefinitionProbe, probeSvcHTTPPort int) { statusProbeContainer.Name = constant.StatusProbeContainerName @@ -336,72 +264,3 @@ func env4VolumeProtection(spec appsv1alpha1.VolumeProtectionSpec) corev1.EnvVar Value: string(value), } } - -// func injectHttp2Shell(pod *corev1.PodSpec) { -// // inject shared volume -// agentVolume := corev1.Volume{ -// Name: constant.ProbeAgentMountName, -// VolumeSource: corev1.VolumeSource{ -// EmptyDir: &corev1.EmptyDirVolumeSource{}, -// }, -// } -// pod.Volumes = append(pod.Volumes, agentVolume) -// -// // inject shell2http -// volumeMount := corev1.VolumeMount{ -// Name: constant.ProbeAgentMountName, -// MountPath: constant.ProbeAgentMountPath, -// } -// binPath := strings.Join([]string{constant.ProbeAgentMountPath, constant.ProbeAgent}, "/") -// initContainer := corev1.Container{ -// Name: constant.ProbeAgent, -// Image: constant.ProbeAgentImage, -// ImagePullPolicy: corev1.PullIfNotPresent, -// VolumeMounts: []corev1.VolumeMount{volumeMount}, -// Command: []string{ -// "cp", -// constant.OriginBinaryPath, -// binPath, -// }, -// } -// pod.InitContainers = append(pod.InitContainers, initContainer) -//} -// -// func injectProbeUtilImages(pod *corev1.PodSpec, probeSetting *appsv1alpha1.ClusterDefinitionProbe, -// port []int32, path, usage string, -// credentialEnv []corev1.EnvVar) { -// // todo: uncomment to enable new lorry way -// // actions := probeSetting.Actions -// // volumeMount := corev1.VolumeMount{ -// // Name: constant.ProbeAgentMountName, -// // MountPath: constant.ProbeAgentMountPath, -// // } -// // binPath := strings.Join([]string{constant.ProbeAgentMountPath, constant.ProbeAgent}, "/") -// // -// // for i, action := range actions { -// // image := action.Image -// // if len(action.Image) == 0 { -// // image = constant.DefaultActionImage -// // } -// // -// // command := []string{ -// // binPath, -// // "-port", fmt.Sprintf("%d", port[i]), -// // "-export-all-vars", -// // "-form", -// // path, -// // strings.Join(action.Command, " "), -// // } -// // -// // container := corev1.Container{ -// // Name: fmt.Sprintf("%s-action-%d", usage, i), -// // Image: image, -// // ImagePullPolicy: corev1.PullIfNotPresent, -// // VolumeMounts: []corev1.VolumeMount{volumeMount}, -// // Env: credentialEnv, -// // Command: command, -// // } -// // -// // pod.Containers = append(pod.Containers, container) -// // } -// } diff --git a/internal/controller/component/probe_utils_test.go b/internal/controller/component/probe_utils_test.go index 64315a5fecd..9b6b7a67e4a 100644 --- a/internal/controller/component/probe_utils_test.go +++ b/internal/controller/component/probe_utils_test.go @@ -44,7 +44,7 @@ var _ = Describe("probe_utils", func() { var clusterDefProbe *appsv1alpha1.ClusterDefinitionProbe BeforeEach(func() { - container = buildProbeContainer() + container = buildLorryContainer() probeServiceHTTPPort, probeServiceGrpcPort = 3501, 50001 clusterDefProbe = &appsv1alpha1.ClusterDefinitionProbe{} @@ -93,20 +93,13 @@ var _ = Describe("probe_utils", func() { Ctx: ctx, Log: logger, } - Expect(buildProbeContainers(reqCtx, component)).Should(Succeed()) - Expect(len(component.PodSpec.Containers)).Should(Equal(3)) + Expect(buildLorryContainers(reqCtx, component)).Should(Succeed()) + Expect(len(component.PodSpec.Containers)).Should(Equal(2)) Expect(component.PodSpec.Containers[0].Command).ShouldNot(BeEmpty()) }) - It("should build role changed probe container", func() { - synthesizedComponent := &SynthesizedComponent{CharacterType: "wesql"} - pod := &corev1.PodSpec{} - buildRoleProbeContainer(synthesizedComponent, container, clusterDefProbe, probeServiceHTTPPort, pod) - Expect(container.ReadinessProbe.HTTPGet).ShouldNot(BeNil()) - }) - It("should build role service container", func() { - buildProbeServiceContainer(component, container, probeServiceHTTPPort, probeServiceGrpcPort) + buildLorryServiceContainer(component, container, probeServiceHTTPPort, probeServiceGrpcPort) Expect(container.Command).ShouldNot(BeEmpty()) }) @@ -138,8 +131,8 @@ var _ = Describe("probe_utils", func() { }, }, } - Expect(buildProbeContainers(reqCtx, component)).Should(Succeed()) - Expect(len(component.PodSpec.Containers)).Should(Equal(4)) + Expect(buildLorryContainers(reqCtx, component)).Should(Succeed()) + Expect(len(component.PodSpec.Containers)).Should(Equal(3)) }) It("build volume protection probe container with RBAC", func() { @@ -161,8 +154,8 @@ var _ = Describe("probe_utils", func() { }, } viper.SetDefault(constant.EnableRBACManager, true) - Expect(buildProbeContainers(reqCtx, component)).Should(Succeed()) - Expect(len(component.PodSpec.Containers)).Should(Equal(4)) + Expect(buildLorryContainers(reqCtx, component)).Should(Succeed()) + Expect(len(component.PodSpec.Containers)).Should(Equal(3)) spec := &appsv1alpha1.VolumeProtectionSpec{} for _, e := range component.PodSpec.Containers[0].Env { if e.Name == constant.KBEnvVolumeProtectionSpec { diff --git a/internal/controller/component/type.go b/internal/controller/component/type.go index d5691697c72..58dd1a9d713 100644 --- a/internal/controller/component/type.go +++ b/internal/controller/component/type.go @@ -47,6 +47,7 @@ type SynthesizedComponent struct { StatefulSpec *v1alpha1.StatefulSetSpec `json:"statefulSpec,omitempty"` ConsensusSpec *v1alpha1.ConsensusSetSpec `json:"consensusSpec,omitempty"` ReplicationSpec *v1alpha1.ReplicationSetSpec `json:"replicationSpec,omitempty"` + RSMSpec *v1alpha1.RSMSpec `json:"rsmSpec,omitempty"` PodSpec *corev1.PodSpec `json:"podSpec,omitempty"` Services []corev1.Service `json:"services,omitempty"` Probes *v1alpha1.ClusterDefinitionProbes `json:"probes,omitempty"` diff --git a/internal/controller/factory/builder.go b/internal/controller/factory/builder.go index 716627c19e1..e43ec13f932 100644 --- a/internal/controller/factory/builder.go +++ b/internal/controller/factory/builder.go @@ -593,6 +593,10 @@ func separateServices(services []corev1.Service) (*corev1.Service, []corev1.Serv } func buildRoleInfo(component *component.SynthesizedComponent) ([]workloads.ReplicaRole, *workloads.RoleProbe, *workloads.MembershipReconfiguration, *workloads.MemberUpdateStrategy) { + if component.RSMSpec != nil { + return buildRoleInfo2(component) + } + var ( roles []workloads.ReplicaRole probe *workloads.RoleProbe @@ -609,6 +613,7 @@ func buildRoleInfo(component *component.SynthesizedComponent) ([]workloads.Repli probe.FailureThreshold = roleProbe.FailureThreshold // set to default value probe.SuccessThreshold = 1 + probe.RoleUpdateMechanism = workloads.DirectAPIServerEventUpdate } // TODO(free6om): set default reconfiguration actions after relative addon refactored @@ -627,6 +632,11 @@ func buildRoleInfo(component *component.SynthesizedComponent) ([]workloads.Repli return roles, probe, reconfiguration, strategy } +func buildRoleInfo2(component *component.SynthesizedComponent) ([]workloads.ReplicaRole, *workloads.RoleProbe, *workloads.MembershipReconfiguration, *workloads.MemberUpdateStrategy) { + rsmSpec := component.RSMSpec + return rsmSpec.Roles, rsmSpec.RoleProbe, rsmSpec.MembershipReconfiguration, rsmSpec.MemberUpdateStrategy +} + func buildRoleInfoFromReplication() []workloads.ReplicaRole { return []workloads.ReplicaRole{ { diff --git a/internal/controller/plan/prepare_test.go b/internal/controller/plan/prepare_test.go index 21936d97059..6f013e73304 100644 --- a/internal/controller/plan/prepare_test.go +++ b/internal/controller/plan/prepare_test.go @@ -450,7 +450,7 @@ var _ = Describe("Cluster Controller", func() { if isStatefulSet(v) { sts := resources[i].(*appsv1.StatefulSet) podSpec := sts.Spec.Template.Spec - Expect(len(podSpec.Containers)).Should(Equal(4)) + Expect(len(podSpec.Containers)).Should(Equal(3)) } } originPodSpec := clusterDef.Spec.ComponentDefs[0].PodSpec diff --git a/internal/controller/rsm/pod_role_event_handler.go b/internal/controller/rsm/pod_role_event_handler.go index 6af517052cd..63c3f0bd440 100644 --- a/internal/controller/rsm/pod_role_event_handler.go +++ b/internal/controller/rsm/pod_role_event_handler.go @@ -24,6 +24,7 @@ import ( "fmt" "regexp" "strings" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -31,6 +32,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/common" + "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) @@ -42,6 +45,11 @@ type PodRoleEventHandler struct{} // probeEventType defines the type of probe event. type probeEventType string +const ( + successEvent = "Success" + roleChangedEvent = "roleChanged" +) + type probeMessage struct { Event probeEventType `json:"event,omitempty"` Message string `json:"message,omitempty"` @@ -57,7 +65,10 @@ const ( var roleMessageRegex = regexp.MustCompile(`Readiness probe failed: .*({.*})`) func (h *PodRoleEventHandler) Handle(cli client.Client, reqCtx intctrlutil.RequestCtx, recorder record.EventRecorder, event *corev1.Event) error { - if event.InvolvedObject.FieldPath != roleProbeEventFieldPath { + if event.InvolvedObject.FieldPath != readinessProbeEventFieldPath && + event.InvolvedObject.FieldPath != directAPIServerEventFieldPath && + event.InvolvedObject.FieldPath != legacyEventFieldPath && + event.Reason != checkRoleEventReason { return nil } var ( @@ -93,50 +104,95 @@ func handleRoleChangedEvent(cli client.Client, reqCtx intctrlutil.RequestCtx, re } // if probe event operation is not impl, check role failed or role invalid, ignore it - if message.Event != "Success" { + if message.Event != successEvent && message.Event != roleChangedEvent { reqCtx.Log.Info("probe event failed", "message", message.Message) return "", nil } role := strings.ToLower(message.Role) - podName := types.NamespacedName{ - Namespace: event.InvolvedObject.Namespace, - Name: event.InvolvedObject.Name, + snapshot := parseGlobalRoleSnapshot(role, event) + for _, pair := range snapshot.PodRoleNamePairs { + podName := types.NamespacedName{ + Namespace: event.InvolvedObject.Namespace, + Name: pair.PodName, + } + // get pod + pod := &corev1.Pod{} + if err := cli.Get(reqCtx.Ctx, podName, pod); err != nil { + return pair.RoleName, err + } + // event belongs to old pod with the same name, ignore it + if pod.Name == pair.PodName && pod.UID != event.InvolvedObject.UID { + return pair.RoleName, nil + } + + // compare the version of the current role snapshot with the last version recorded in the pod annotation, + // stale role snapshot will be ignored. + lastSnapshotVersion, ok := pod.Annotations[constant.LastRoleSnapshotVersionAnnotationKey] + if ok { + + if snapshot.Version <= lastSnapshotVersion { + reqCtx.Log.Info("stale role snapshot received, ignore it", "snapshot", snapshot) + return pair.RoleName, nil + } + } + + name, _ := intctrlutil.GetParentNameAndOrdinal(pod) + rsm := &workloads.ReplicatedStateMachine{} + if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: pod.Namespace, Name: name}, rsm); err != nil { + return "", err + } + reqCtx.Log.V(1).Info("handle role change event", "pod", pod.Name, "role", role, "originalRole", message.OriginalRole) + + if err := updatePodRoleLabel(cli, reqCtx, *rsm, pod, pair.RoleName, snapshot.Version); err != nil { + return "", err + } } - // get pod - pod := &corev1.Pod{} - if err := cli.Get(reqCtx.Ctx, podName, pod); err != nil { - return role, err - } - // event belongs to old pod with the same name, ignore it - if pod.UID != event.InvolvedObject.UID { - return role, nil + return role, nil +} + +func parseGlobalRoleSnapshot(role string, event *corev1.Event) *common.GlobalRoleSnapshot { + snapshot := &common.GlobalRoleSnapshot{} + if err := json.Unmarshal([]byte(role), snapshot); err == nil { + return snapshot } - name, _ := intctrlutil.GetParentNameAndOrdinal(pod) - rsm := &workloads.ReplicatedStateMachine{} - if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: pod.Namespace, Name: name}, rsm); err != nil { - return "", err + snapshot.Version = event.EventTime.Time.Format(time.RFC3339Nano) + pair := common.PodRoleNamePair{ + PodName: event.InvolvedObject.Name, + RoleName: role, } - reqCtx.Log.V(1).Info("handle role change event", "pod", pod.Name, "role", role, "originalRole", message.OriginalRole) - - return role, updatePodRoleLabel(cli, reqCtx, *rsm, pod, role) + snapshot.PodRoleNamePairs = append(snapshot.PodRoleNamePairs, pair) + return snapshot } // parseProbeEventMessage parses probe event message. func parseProbeEventMessage(reqCtx intctrlutil.RequestCtx, event *corev1.Event) *probeMessage { message := &probeMessage{} - matches := roleMessageRegex.FindStringSubmatch(event.Message) - if len(matches) != 2 { - reqCtx.Log.Info("parser Readiness probe event message failed", "message", event.Message) - return nil + tryUnmarshalDirectAPIServerEvent := func() error { + return json.Unmarshal([]byte(event.Message), message) } - msg := matches[1] - err := json.Unmarshal([]byte(msg), message) - if err != nil { - // not role related message, ignore it - reqCtx.Log.Info("not role message", "message", event.Message, "error", err) + tryUnmarshalReadinessProbeEvent := func() error { + matches := roleMessageRegex.FindStringSubmatch(event.Message) + if len(matches) != 2 { + reqCtx.Log.Info("parser Readiness probe event message failed", "message", event.Message) + return fmt.Errorf("parser Readiness probe event message failed: %s", event.Message) + } + msg := matches[1] + err := json.Unmarshal([]byte(msg), message) + if err != nil { + // not role related message, ignore it + reqCtx.Log.Info("not role message", "message", event.Message, "error", err) + return err + } return nil } - return message + + if err := tryUnmarshalDirectAPIServerEvent(); err == nil { + return message + } + if err := tryUnmarshalReadinessProbeEvent(); err == nil { + return message + } + return nil } diff --git a/internal/controller/rsm/pod_role_event_handler_test.go b/internal/controller/rsm/pod_role_event_handler_test.go index f062077c5de..abfee5d5ce7 100644 --- a/internal/controller/rsm/pod_role_event_handler_test.go +++ b/internal/controller/rsm/pod_role_event_handler_test.go @@ -52,7 +52,7 @@ var _ = Describe("pod role label event handler test", func() { Namespace: pod.Namespace, Name: pod.Name, UID: pod.UID, - FieldPath: roleProbeEventFieldPath, + FieldPath: readinessProbeEventFieldPath, } role := workloads.ReplicaRole{ Name: "leader", diff --git a/internal/controller/rsm/transformer_object_generation.go b/internal/controller/rsm/transformer_object_generation.go index 96a7f28a117..f7403abc923 100644 --- a/internal/controller/rsm/transformer_object_generation.go +++ b/internal/controller/rsm/transformer_object_generation.go @@ -401,7 +401,6 @@ func injectRoleProbeAgentContainer(rsm workloads.ReplicatedStateMachine, templat if probeDaemonPort == 0 { probeDaemonPort = defaultRoleProbeDaemonPort } - roleProbeURI := fmt.Sprintf(roleProbeURIFormat, strconv.Itoa(probeDaemonPort)) env := credentialEnv env = append(env, corev1.EnvVar{ @@ -437,13 +436,56 @@ func injectRoleProbeAgentContainer(rsm workloads.ReplicatedStateMachine, templat }) } + // inject role update mechanism env + env = append(env, + corev1.EnvVar{ + Name: RoleUpdateMechanismVarName, + Value: string(roleProbe.RoleUpdateMechanism), + }) + + // lorry related envs + env = append(env, + corev1.EnvVar{ + Name: constant.KBEnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + corev1.EnvVar{ + Name: constant.KBEnvNamespace, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + corev1.EnvVar{ + Name: constant.KBEnvPodUID, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, + corev1.EnvVar{ + Name: constant.KBEnvNodeName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + ) + // build container container := corev1.Container{ Name: roleProbeName, Image: image, ImagePullPolicy: "IfNotPresent", Command: []string{ - "role-agent", + "lorry", "--port", strconv.Itoa(probeDaemonPort), }, Ports: []corev1.ContainerPort{{ @@ -453,11 +495,9 @@ func injectRoleProbeAgentContainer(rsm workloads.ReplicatedStateMachine, templat }}, ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{ - Command: []string{ - "/bin/grpc_health_probe", - roleProbeURI, - }, + HTTPGet: &corev1.HTTPGetAction{ + Path: roleProbeURI, + Port: intstr.FromInt(probeDaemonPort), }, }, InitialDelaySeconds: roleProbe.InitialDelaySeconds, diff --git a/internal/controller/rsm/types.go b/internal/controller/rsm/types.go index 1e769aed740..aaf799b83d2 100644 --- a/internal/controller/rsm/types.go +++ b/internal/controller/rsm/types.go @@ -69,26 +69,31 @@ const ( jobScenarioMembership = "membership-reconfiguration" jobScenarioUpdate = "pod-update" - roleProbeName = "role-observe" - roleAgentVolumeName = "role-agent" - roleAgentInstallerName = "role-agent-installer" - roleAgentVolumeMountPath = "/role-probe" - roleAgentName = "agent" - shell2httpImage = "msoap/shell2http:1.16.0" - shell2httpBinaryPath = "/app/shell2http" - shell2httpServePath = "/role" - defaultRoleProbeAgentImage = "apecloud/kubeblocks-role-agent:latest" - defaultRoleProbeDaemonPort = 7373 - roleProbeURIFormat = "-addr=localhost:%s" - defaultActionImage = "busybox:latest" - usernameCredentialVarName = "KB_RSM_USERNAME" - passwordCredentialVarName = "KB_RSM_PASSWORD" - servicePortVarName = "KB_RSM_SERVICE_PORT" - actionSvcListVarName = "KB_RSM_ACTION_SVC_LIST" - leaderHostVarName = "KB_RSM_LEADER_HOST" - targetHostVarName = "KB_RSM_TARGET_HOST" - roleProbeEventFieldPath = "spec.containers{" + roleProbeName + "}" - actionSvcPortBase = int32(36500) + roleProbeName = "kb-role-probe" + roleAgentVolumeName = "role-agent" + roleAgentInstallerName = "role-agent-installer" + roleAgentVolumeMountPath = "/role-probe" + roleAgentName = "agent" + shell2httpImage = "msoap/shell2http:1.16.0" + shell2httpBinaryPath = "/app/shell2http" + shell2httpServePath = "/role" + defaultRoleProbeAgentImage = "apecloud/kubeblocks-tools:latest" + defaultRoleProbeDaemonPort = 7373 + roleProbeURI = "/v1.0/bindings/custom?operation=checkRole" + defaultActionImage = "busybox:latest" + usernameCredentialVarName = "KB_RSM_USERNAME" + passwordCredentialVarName = "KB_RSM_PASSWORD" + servicePortVarName = "KB_RSM_SERVICE_PORT" + actionSvcListVarName = "KB_RSM_ACTION_SVC_LIST" + leaderHostVarName = "KB_RSM_LEADER_HOST" + targetHostVarName = "KB_RSM_TARGET_HOST" + RoleUpdateMechanismVarName = "KB_RSM_ROLE_UPDATE_MECHANISM" + directAPIServerEventFieldPath = "spec.containers{sqlchannel}" + readinessProbeEventFieldPath = "spec.containers{" + roleProbeName + "}" + legacyEventFieldPath = "spec.containers{kb-checkrole}" + checkRoleEventReason = "checkRole" + + actionSvcPortBase = int32(36500) ) type rsmTransformContext struct { diff --git a/internal/controller/rsm/utils.go b/internal/controller/rsm/utils.go index 15cdb802c8e..7b54b83a964 100644 --- a/internal/controller/rsm/utils.go +++ b/internal/controller/rsm/utils.go @@ -134,10 +134,8 @@ func composeRolePriorityMap(rsm workloads.ReplicatedStateMachine) map[string]int } // updatePodRoleLabel updates pod role label when internal container role changed -func updatePodRoleLabel(cli client.Client, - reqCtx intctrlutil.RequestCtx, - rsm workloads.ReplicatedStateMachine, - pod *corev1.Pod, roleName string) error { +func updatePodRoleLabel(cli client.Client, reqCtx intctrlutil.RequestCtx, + rsm workloads.ReplicatedStateMachine, pod *corev1.Pod, roleName string, version string) error { ctx := reqCtx.Ctx roleMap := composeRoleMap(rsm) // role not defined in CR, ignore it @@ -154,6 +152,11 @@ func updatePodRoleLabel(cli client.Client, delete(pod.Labels, roleLabelKey) delete(pod.Labels, rsmAccessModeLabelKey) } + + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + pod.Annotations[constant.LastRoleSnapshotVersionAnnotationKey] = version return cli.Patch(ctx, pod, patch) } @@ -170,9 +173,6 @@ func setMembersStatus(rsm *workloads.ReplicatedStateMachine, pods []corev1.Pod) newMembersStatus := make([]workloads.MemberStatus, 0) roleMap := composeRoleMap(*rsm) for _, pod := range pods { - if intctrlutil.GetPodRevision(&pod) != rsm.Status.UpdateRevision { - continue - } if !intctrlutil.PodIsReadyWithLabel(pod) { continue } @@ -524,14 +524,16 @@ func getLabels(rsm *workloads.ReplicatedStateMachine) map[string]string { } func getSvcSelector(rsm *workloads.ReplicatedStateMachine, headless bool) map[string]string { - var leader *workloads.ReplicaRole - for _, role := range rsm.Spec.Roles { - if role.IsLeader && len(role.Name) > 0 { - leader = &role - break + selectors := make(map[string]string, 0) + + if !headless { + for _, role := range rsm.Spec.Roles { + if role.IsLeader && len(role.Name) > 0 { + selectors[constant.RoleLabelKey] = role.Name + break + } } } - selectors := make(map[string]string, 0) if viper.GetBool(FeatureGateRSMCompatibilityMode) { keys := []string{ @@ -544,18 +546,12 @@ func getSvcSelector(rsm *workloads.ReplicatedStateMachine, headless bool) map[st selectors[key] = value } } - if leader != nil && !headless { - selectors[constant.RoleLabelKey] = leader.Name - } return selectors } for k, v := range rsm.Spec.Selector.MatchLabels { selectors[k] = v } - if leader != nil && !headless { - selectors[rsmAccessModeLabelKey] = string(leader.AccessMode) - } return selectors } diff --git a/internal/controllerutil/pod_utils.go b/internal/controllerutil/pod_utils.go index 4e81d0086ce..842db70d8bd 100644 --- a/internal/controllerutil/pod_utils.go +++ b/internal/controllerutil/pod_utils.go @@ -34,6 +34,7 @@ import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" + viper "github.com/apecloud/kubeblocks/internal/viperx" ) // statefulPodRegex is a regular expression that extracts the parent StatefulSet and ordinal from the Name of a Pod @@ -357,12 +358,13 @@ func GetProbeHTTPPort(pod *corev1.Pod) (int32, error) { // GetProbeContainerName gets the probe container from pod func GetProbeContainerName(pod *corev1.Pod) (string, error) { + lorryImage := viper.GetString(constant.KBToolsImage) for _, container := range pod.Spec.Containers { - if container.Name == constant.RoleProbeContainerName { - return constant.RoleProbeContainerName, nil + if container.Image == lorryImage { + return container.Name, nil } } - return "", fmt.Errorf("container %s not found", constant.RoleProbeContainerName) + return "", fmt.Errorf("container %s not found", lorryImage) } diff --git a/lorry/binding/base.go b/lorry/binding/base.go index 88203da0aa6..d28d3465ac0 100644 --- a/lorry/binding/base.go +++ b/lorry/binding/base.go @@ -41,13 +41,6 @@ type Operation func(ctx context.Context, req *ProbeRequest, resp *ProbeResponse) type OpsResult map[string]interface{} -type GlobalInfo struct { - Event string `json:"event,omitempty"` - Term int `json:"term,omitempty"` - PodName2Role map[string]string `json:"map,omitempty"` - Message string `json:"message,omitempty"` -} - // AccessMode defines SVC access mode enums. // +enum type AccessMode string @@ -76,14 +69,12 @@ type BaseOperations struct { DBAddress string DBType string OriRole string - OriGlobalInfo *GlobalInfo DBRoles map[string]AccessMode Logger logr.Logger Metadata map[string]string InitIfNeed func() bool Manager component.DBManager GetRole func(context.Context, *ProbeRequest, *ProbeResponse) (string, error) - GetGlobalInfo func(ctx context.Context, request *ProbeRequest, response *ProbeResponse) (GlobalInfo, error) OperationsMap map[OperationKind]Operation } @@ -204,6 +195,7 @@ func (ops *BaseOperations) Invoke(ctx context.Context, req *ProbeRequest) (*Prob if err != nil { return nil, err } + ops.Logger.Info("operation called", "operation", req.Operation, "result", opsRes) if opsRes != nil { res, _ := json.Marshal(opsRes) resp.Data = res @@ -235,7 +227,7 @@ func (ops *BaseOperations) CheckRoleOps(ctx context.Context, req *ProbeRequest, opsRes["message"] = err.Error() if ops.CheckRoleFailedCount%ops.FailedEventReportFrequency == 0 { ops.Logger.Info("role checks failed continuously", "times", ops.CheckRoleFailedCount) - SentProbeEvent(ctx, opsRes, ops.Logger) + SentProbeEvent(ctx, opsRes, resp, ops.Logger) } ops.CheckRoleFailedCount++ return opsRes, nil @@ -252,7 +244,7 @@ func (ops *BaseOperations) CheckRoleOps(ctx context.Context, req *ProbeRequest, opsRes["role"] = role if ops.OriRole != role { ops.OriRole = role - SentProbeEvent(ctx, opsRes, ops.Logger) + SentProbeEvent(ctx, opsRes, resp, ops.Logger) } // RoleUnchangedCount is the count of consecutive role unchanged checks. @@ -294,51 +286,6 @@ func (ops *BaseOperations) GetRoleOps(ctx context.Context, req *ProbeRequest, re return opsRes, nil } -func (ops *BaseOperations) GetGlobalInfoOps(ctx context.Context, req *ProbeRequest, resp *ProbeResponse) (OpsResult, error) { - opsRes := OpsResult{} - opsRes["operation"] = GetGlobalInfoOperation - if ops.GetGlobalInfo == nil { - message := fmt.Sprintf("getGlobalInfo operation is not implemented for %v", ops.DBType) - ops.Logger.Error(fmt.Errorf("not implemented"), message) - opsRes["event"] = OperationNotImplemented - opsRes["message"] = message - resp.Metadata[StatusCode] = OperationNotFoundHTTPCode - return opsRes, nil - } - - globalInfo, err := ops.GetGlobalInfo(ctx, req, resp) - if err != nil { - ops.Logger.Error(err, "error executing GlobalInfo") - opsRes["event"] = OperationFailed - opsRes["message"] = err.Error() - if ops.CheckRoleFailedCount%ops.FailedEventReportFrequency == 0 { - ops.Logger.Info("getRole failed continuously", "failed times", ops.CheckRoleFailedCount) - SentProbeEvent(ctx, opsRes, ops.Logger) - } - // just reuse the checkRoleFailCount temporarily - ops.CheckRoleFailedCount++ - return opsRes, nil - } - - ops.CheckRoleFailedCount = 0 - - for _, role := range globalInfo.PodName2Role { - if isValid, message := ops.roleValidate(role); !isValid { - opsRes["event"] = OperationInvalid - opsRes["message"] = message - return opsRes, nil - } - } - - globalInfo.Transform(opsRes) - if ops.OriGlobalInfo == nil || globalInfo.ShouldUpdate(*ops.OriGlobalInfo) { - ops.OriGlobalInfo = &globalInfo - SentProbeEvent(ctx, opsRes, ops.Logger) - } - - return opsRes, nil -} - // Component may have some internal roles that needn't be exposed to end user, // and not configured in cluster definition, e.g. ETCD's Candidate. // roleValidate is used to filter the internal roles and decrease the number @@ -381,7 +328,7 @@ func (ops *BaseOperations) CheckRunningOps(ctx context.Context, req *ProbeReques if ops.CheckRunningFailedCount%ops.FailedEventReportFrequency == 0 { ops.Logger.Info("running checks failed continuously", "times", ops.CheckRunningFailedCount) // resp.Metadata[StatusCode] = OperationFailedHTTPCode - SentProbeEvent(ctx, opsRes, ops.Logger) + SentProbeEvent(ctx, opsRes, resp, ops.Logger) } ops.CheckRunningFailedCount++ return opsRes, nil @@ -549,29 +496,3 @@ func (ops *BaseOperations) LeaveMemberOps(ctx context.Context, req *ProbeRequest opsRes["message"] = "left of the current member is complete" return opsRes, nil } - -func (g *GlobalInfo) ShouldUpdate(another GlobalInfo) bool { - if g.Term != another.Term { - return g.Term < another.Term - } - if g.Message != another.Message || g.Event != another.Event { - return true - } - for k, v := range g.PodName2Role { - if s, ok := another.PodName2Role[k]; ok { - if s != v { - return true - } - } else { - return true - } - } - return false -} - -func (g *GlobalInfo) Transform(result OpsResult) { - result["event"] = g.Event - result["term"] = g.Term - result["message"] = g.Message - result["map"] = g.PodName2Role -} diff --git a/lorry/binding/custom/custom.go b/lorry/binding/custom/custom.go index 5f87868e169..0de70843e22 100644 --- a/lorry/binding/custom/custom.go +++ b/lorry/binding/custom/custom.go @@ -27,12 +27,13 @@ import ( "net" "net/http" "net/url" - "strconv" + "regexp" "strings" "time" ctrl "sigs.k8s.io/controller-runtime" + "github.com/apecloud/kubeblocks/internal/common" viper "github.com/apecloud/kubeblocks/internal/viperx" . "github.com/apecloud/kubeblocks/lorry/binding" "github.com/apecloud/kubeblocks/lorry/component" @@ -46,6 +47,8 @@ type HTTPCustom struct { BaseOperations } +var perNodeRegx = regexp.MustCompile("[a-zA-Z0-9]+") + // NewHTTPCustom returns a new HTTPCustom. func NewHTTPCustom() *HTTPCustom { logger := ctrl.Log.WithName("Custom") @@ -81,9 +84,7 @@ func (h *HTTPCustom) Init(metadata component.Properties) error { h.BaseOperations.Init(metadata) h.BaseOperations.GetRole = h.GetRole - h.BaseOperations.GetGlobalInfo = h.GetGlobalInfo h.OperationsMap[CheckRoleOperation] = h.CheckRoleOps - h.OperationsMap[GetGlobalInfoOperation] = h.GetGlobalInfoOps return nil } @@ -99,73 +100,49 @@ func (h *HTTPCustom) GetRole(ctx context.Context, req *ProbeRequest, resp *Probe ) for _, port := range *h.actionSvcPorts { - u := fmt.Sprintf("http://127.0.0.1:%d/role?KB_CONSENSUS_SET_LAST_STDOUT=%s", port, url.QueryEscape(string(lastOutput))) + u := fmt.Sprintf("http://127.0.0.1:%d/role?KB_RSM_LAST_STDOUT=%s", port, url.QueryEscape(string(lastOutput))) lastOutput, err = h.callAction(ctx, u) if err != nil { return "", err } + h.Logger.Info("action succeed", "url", u, "output", string(lastOutput)) } + finalOutput := strings.TrimSpace(string(lastOutput)) - return string(lastOutput), nil -} - -func (h *HTTPCustom) GetRoleOps(ctx context.Context, req *ProbeRequest, resp *ProbeResponse) (OpsResult, error) { - role, err := h.GetRole(ctx, req, resp) - if err != nil { - return nil, err - } - opsRes := OpsResult{} - opsRes["role"] = role - return opsRes, nil -} - -func (h *HTTPCustom) GetGlobalInfo(ctx context.Context, req *ProbeRequest, resp *ProbeResponse) (GlobalInfo, error) { - if h.actionSvcPorts == nil { - return GlobalInfo{}, nil + if perNodeRegx.MatchString(finalOutput) { + return finalOutput, nil } - var ( - lastOutput []byte - err error - ) - - for _, port := range *h.actionSvcPorts { - u := fmt.Sprintf("http://127.0.0.1:%d/role?KB_CONSENSUS_SET_LAST_STDOUT=%s", port, url.QueryEscape(string(lastOutput))) - lastOutput, err = h.callAction(ctx, u) - if err != nil { - return GlobalInfo{}, err - } - } - - // csv format: term,podname,role - parseCSV := func(input []byte) (GlobalInfo, error) { - res := GlobalInfo{PodName2Role: map[string]string{}} - str := string(input) - lines := strings.Split(str, "\n") + // csv format: term,podName,role + parseCSV := func(input string) (string, error) { + res := common.GlobalRoleSnapshot{} + lines := strings.Split(input, "\n") for _, line := range lines { - fields := strings.Split(line, ",") + fields := strings.Split(strings.TrimSpace(line), ",") if len(fields) != 3 { - return res, err + return "", err } - res.Term, err = strconv.Atoi(fields[0]) - if err != nil { - return res, err + res.Version = strings.TrimSpace(fields[0]) + pair := common.PodRoleNamePair{ + PodName: strings.TrimSpace(fields[1]), + RoleName: strings.ToLower(strings.TrimSpace(fields[2])), } - k := fields[1] - v := fields[2] - res.PodName2Role[k] = v + res.PodRoleNamePairs = append(res.PodRoleNamePairs, pair) } - return res, nil + resByte, err := json.Marshal(res) + return string(resByte), err } + return parseCSV(finalOutput) +} - res, err := parseCSV(lastOutput) +func (h *HTTPCustom) GetRoleOps(ctx context.Context, req *ProbeRequest, resp *ProbeResponse) (OpsResult, error) { + role, err := h.GetRole(ctx, req, resp) if err != nil { - return GlobalInfo{}, err + return nil, err } - res.Event = OperationSuccess - h.Logger.Info("GetGlobalInfo get result", "result", res) - - return res, nil + opsRes := OpsResult{} + opsRes["role"] = role + return opsRes, nil } // callAction performs an HTTP request to local HTTP endpoint specified by actionSvcPort diff --git a/lorry/binding/custom/custom_test.go b/lorry/binding/custom/custom_test.go index 06441ae24ca..c9f88a496f7 100644 --- a/lorry/binding/custom/custom_test.go +++ b/lorry/binding/custom/custom_test.go @@ -21,6 +21,7 @@ package custom import ( "context" + "encoding/json" "fmt" "net/http" "net/http/httptest" @@ -31,6 +32,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/apecloud/kubeblocks/internal/common" viper "github.com/apecloud/kubeblocks/internal/viperx" "github.com/apecloud/kubeblocks/lorry/binding" "github.com/apecloud/kubeblocks/lorry/component" @@ -75,7 +77,7 @@ func TestInit(t *testing.T) { } } -func TestGlobalInfo(t *testing.T) { +func TestGlobalRoleSnapshot(t *testing.T) { var lines []string for i := 0; i < 3; i++ { podName := "pod-" + strconv.Itoa(i) @@ -100,7 +102,7 @@ func TestGlobalInfo(t *testing.T) { }{ "get": { input: join, - operation: "getGlobalInfo", + operation: "getRole", metadata: nil, path: "/", err: "", @@ -110,13 +112,14 @@ func TestGlobalInfo(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { response := binding.ProbeResponse{} - info, err := hs.GetGlobalInfo(context.TODO(), &binding.ProbeRequest{ + info, err := hs.GetRole(context.TODO(), &binding.ProbeRequest{ Operation: OperationKind(tc.operation), }, &response) require.NoError(t, err) - assert.Equal(t, OperationSuccess, info.Event) - assert.Equal(t, 3, len(info.PodName2Role)) - assert.Equal(t, 1, info.Term) + snapshot := &common.GlobalRoleSnapshot{} + assert.NoError(t, json.Unmarshal([]byte(info), snapshot)) + assert.Equal(t, 3, len(snapshot.PodRoleNamePairs)) + assert.Equal(t, "1", snapshot.Version) }) } diff --git a/lorry/binding/mysql/mysql.go b/lorry/binding/mysql/mysql.go index 51ee885c9d6..80c161e9c51 100644 --- a/lorry/binding/mysql/mysql.go +++ b/lorry/binding/mysql/mysql.go @@ -106,7 +106,6 @@ func (mysqlOps *MysqlOperations) Init(metadata component.Properties) error { mysqlOps.DBType = "mysql" // mysqlOps.InitIfNeed = mysqlOps.initIfNeed mysqlOps.BaseOperations.GetRole = mysqlOps.GetRole - mysqlOps.BaseOperations.GetGlobalInfo = mysqlOps.GetGlobalInfo mysqlOps.DBPort = config.GetDBPort() mysqlOps.RegisterOperationOnDBReady(GetRoleOperation, mysqlOps.GetRoleOps, manager) @@ -115,7 +114,6 @@ func (mysqlOps *MysqlOperations) Init(metadata component.Properties) error { mysqlOps.RegisterOperationOnDBReady(CheckStatusOperation, mysqlOps.CheckStatusOps, manager) mysqlOps.RegisterOperationOnDBReady(ExecOperation, mysqlOps.ExecOps, manager) mysqlOps.RegisterOperationOnDBReady(QueryOperation, mysqlOps.QueryOps, manager) - mysqlOps.RegisterOperationOnDBReady(GetGlobalInfoOperation, mysqlOps.GetGlobalInfoOps, manager) // following are ops for account management mysqlOps.RegisterOperationOnDBReady(ListUsersOp, mysqlOps.listUsersOps, manager) diff --git a/lorry/binding/types.go b/lorry/binding/types.go index 348e912bd3f..c888f1bab56 100644 --- a/lorry/binding/types.go +++ b/lorry/binding/types.go @@ -49,6 +49,8 @@ const ( roleEventReportFrequency = int(1 / roleEventRecordQPS) defaultFailedEventReportFrequency = 1800 defaultRoleDetectionThreshold = 300 + + rsmRoleUpdateMechanismVarName = "KB_RSM_ROLE_UPDATE_MECHANISM" ) const ( diff --git a/lorry/binding/utils.go b/lorry/binding/utils.go index 6e7dfcd01b1..ffad3301a8b 100644 --- a/lorry/binding/utils.go +++ b/lorry/binding/utils.go @@ -37,7 +37,9 @@ import ( "k8s.io/client-go/kubernetes/scheme" ctlruntime "sigs.k8s.io/controller-runtime" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" + viper "github.com/apecloud/kubeblocks/internal/viperx" "github.com/apecloud/kubeblocks/lorry/component" . "github.com/apecloud/kubeblocks/lorry/util" ) @@ -220,15 +222,26 @@ func String2RoleType(roleName string) RoleType { return CustomizedRole } -func SentProbeEvent(ctx context.Context, opsResult OpsResult, log logr.Logger) { +func SentProbeEvent(ctx context.Context, opsResult OpsResult, resp *ProbeResponse, log logr.Logger) { log.Info(fmt.Sprintf("send event: %v", opsResult)) - event, err := createProbeEvent(opsResult) - if err != nil { - log.Error(err, "generate event failed") - return + roleUpdateMechanism := workloads.NoneUpdate + if viper.IsSet(rsmRoleUpdateMechanismVarName) { + roleUpdateMechanism = workloads.RoleUpdateMechanism(viper.GetString(rsmRoleUpdateMechanismVarName)) } + switch roleUpdateMechanism { + case workloads.ReadinessProbeEventUpdate: + resp.Metadata[StatusCode] = OperationFailedHTTPCode + case workloads.DirectAPIServerEventUpdate: + event, err := createProbeEvent(opsResult) + if err != nil { + log.Error(err, "generate event failed") + return + } - _ = sendEvent(ctx, log, event) + _ = sendEvent(ctx, log, event) + default: + log.Info(fmt.Sprintf("no event sent, RoleUpdateMechanism: %s", roleUpdateMechanism)) + } } func createProbeEvent(opsResult OpsResult) (*corev1.Event, error) { diff --git a/lorry/middleware/http/probe/checks_middleware.go b/lorry/middleware/http/probe/checks_middleware.go index e5fce9b1373..614aa63031f 100644 --- a/lorry/middleware/http/probe/checks_middleware.go +++ b/lorry/middleware/http/probe/checks_middleware.go @@ -28,8 +28,7 @@ import ( "strings" "github.com/go-logr/logr" - "github.com/go-logr/zapr" - "go.uber.org/zap" + ctrl "sigs.k8s.io/controller-runtime" ) const ( @@ -48,14 +47,10 @@ type RequestMeta struct { Metadata map[string]string `json:"metadata"` } -var Logger logr.Logger +var logger logr.Logger func init() { - development, err := zap.NewProduction() - if err != nil { - panic(err) - } - Logger = zapr.NewLogger(development) + logger = ctrl.Log.WithName("middleware") } func GetRequestBody(operation string, args map[string][]string) []byte { @@ -69,7 +64,7 @@ func GetRequestBody(operation string, args map[string][]string) []byte { } else { marshal, err := json.Marshal(value) if err != nil { - Logger.Error(err, "getRequestBody marshal json error") + logger.Error(err, "getRequestBody marshal json error") return } metadata[key] = string(marshal) @@ -102,21 +97,21 @@ func SetMiddleware(next http.HandlerFunc) http.HandlerFunc { body := GetRequestBody(operation, uri.Query()) request.Body = io.NopCloser(bytes.NewReader(body)) } else { - Logger.Info("unknown probe operation", "operation", operation) + logger.Info("unknown probe operation", "operation", operation) } } - Logger.Info("receive request", "request", request.RequestURI) + logger.Info("receive request", "request", request.RequestURI) next(writer, request) code := writer.Header().Get(statusCodeHeader) statusCode, err := strconv.Atoi(code) if err == nil { // header has a statusCodeHeader writer.WriteHeader(statusCode) - Logger.Info("response abnormal") + logger.Info("write response with header", "statusCode", statusCode) } else { // header has no statusCodeHeader - Logger.Info("response has no statusCodeHeader") + logger.Info("response has no statusCodeHeader") } } } diff --git a/lorry/middleware/http/probe/router.go b/lorry/middleware/http/probe/router.go index d49a315e809..66bb03a44e8 100644 --- a/lorry/middleware/http/probe/router.go +++ b/lorry/middleware/http/probe/router.go @@ -101,7 +101,7 @@ func GetRouter() func(writer http.ResponseWriter, request *http.Request) { // get the character type character := GetCharacter(request.URL.Path) if character == "" { - Logger.Error(nil, "character type missing in path") + logger.Error(nil, "character type missing in path") return } @@ -109,14 +109,14 @@ func GetRouter() func(writer http.ResponseWriter, request *http.Request) { defer body.Close() buf, err := io.ReadAll(request.Body) if err != nil { - Logger.Error(err, "request body read failed") + logger.Error(err, "request body read failed") return } meta := &RequestMeta{Metadata: map[string]string{}} err = json.Unmarshal(buf, meta) if err != nil { - Logger.Error(err, "request body unmarshal failed") + logger.Error(err, "request body unmarshal failed") return } probeRequest := &ProbeRequest{Metadata: meta.Metadata} @@ -124,14 +124,15 @@ func GetRouter() func(writer http.ResponseWriter, request *http.Request) { // route the request to engine probeResp, err := route(character, request.Context(), probeRequest) + logger.Info("request routed", "request", probeRequest, "response", probeResp) if err != nil { - Logger.Error(err, "exec ops failed") + logger.Error(err, "exec ops failed") msg := fmt.Sprintf("exec ops failed: %v", err) writer.Header().Add(statusCodeHeader, OperationFailedHTTPCode) _, err := writer.Write([]byte(msg)) if err != nil { - Logger.Error(err, "ResponseWriter writes error when router") + logger.Error(err, "ResponseWriter writes error when router") } } else { code, ok := probeResp.Metadata[StatusCode] @@ -142,7 +143,7 @@ func GetRouter() func(writer http.ResponseWriter, request *http.Request) { writer.Header().Add(RespEndTimeKey, probeResp.Metadata[RespEndTimeKey]) _, err := writer.Write(probeResp.Data) if err != nil { - Logger.Error(err, "ResponseWriter writes error when router") + logger.Error(err, "ResponseWriter writes error when router") } } } @@ -163,7 +164,7 @@ func route(character string, ctx context.Context, request *ProbeRequest) (*Probe ops, ok := builtinMap[character] // if there is no builtin type, use the custom if !ok { - Logger.Info("No correspond builtin type, use the custom...") + logger.Info("No correspond builtin type, use the custom...") return customOp.Invoke(ctx, request) } return ops.Invoke(ctx, request) diff --git a/lorry/util/types.go b/lorry/util/types.go index 3a82c5c477f..ee6151cec2b 100644 --- a/lorry/util/types.go +++ b/lorry/util/types.go @@ -48,10 +48,9 @@ const ( QueryOperation OperationKind = "query" CloseOperation OperationKind = "close" - LockOperation OperationKind = "lockInstance" - UnlockOperation OperationKind = "unlockInstance" - VolumeProtection OperationKind = "volumeProtection" - GetGlobalInfoOperation OperationKind = "getGlobalInfo" + LockOperation OperationKind = "lockInstance" + UnlockOperation OperationKind = "unlockInstance" + VolumeProtection OperationKind = "volumeProtection" // actions for cluster accounts management ListUsersOp OperationKind = "listUsers" From 2dae595764971fe5a691b259e4c376481274b45b Mon Sep 17 00:00:00 2001 From: yijing Date: Wed, 20 Sep 2023 18:12:16 +0800 Subject: [PATCH 05/58] Update e2e-kbcli.yml --- .github/workflows/e2e-kbcli.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-kbcli.yml b/.github/workflows/e2e-kbcli.yml index 25ec535d1cd..06e4630dbb1 100644 --- a/.github/workflows/e2e-kbcli.yml +++ b/.github/workflows/e2e-kbcli.yml @@ -12,7 +12,7 @@ on: required: false default: '' TEST_TYPE: - description: 'test type (e.g. mysql|postgres|redis|mongo|kafka|pulsar|weaviate|qdrant|smarte|scale|greptimedb|nebula|risingwave|starrocks|oceanbase|foxlake)' + description: 'test type (e.g. mysql|postgres|redis|mongo|kafka|pulsar|weaviate|qdrant|smarte|scale|greptimedb|nebula|risingwave|starrocks|oceanbase|foxlake|oracle-mysql)' required: false default: '' CLOUD_PROVIDER: From 51c3704f9fd78732c108a5b2f3ac05f4393abe93 Mon Sep 17 00:00:00 2001 From: huangzhangshu <109708205+JashBook@users.noreply.github.com> Date: Wed, 20 Sep 2023 19:26:25 +0800 Subject: [PATCH 06/58] chore: change addon nvidia-gpu-exporter addonChartLocationBase (#5205) --- .../helm/templates/addons/nvidia-gpu-exporter-addon.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deploy/helm/templates/addons/nvidia-gpu-exporter-addon.yaml b/deploy/helm/templates/addons/nvidia-gpu-exporter-addon.yaml index 0c4a93dd735..0302c1b35fb 100644 --- a/deploy/helm/templates/addons/nvidia-gpu-exporter-addon.yaml +++ b/deploy/helm/templates/addons/nvidia-gpu-exporter-addon.yaml @@ -14,7 +14,14 @@ spec: type: Helm helm: - chartLocationURL: https://jihulab.com/api/v4/projects/85949/packages/helm/stable/charts/nvidia-gpu-exporter-0.3.1.tgz + {{- include "kubeblocks.addonChartLocationURL" ( dict "name" "nvidia-gpu-exporter" "version" "0.3.1" "values" .Values) | indent 4 }} + {{- include "kubeblocks.addonChartsImage" . | indent 4 }} + + installOptions: + {{- if hasPrefix "oci://" .Values.addonChartLocationBase }} + version: 0.3.1 + {{- end }} + installable: autoInstall: false From 90fad10161e11760e987c049448085795075132f Mon Sep 17 00:00:00 2001 From: zjx20 Date: Thu, 21 Sep 2023 08:18:06 +0800 Subject: [PATCH 07/58] feat: support PVC based backup repo (#5177) --- .../storage/v1alpha1/storageprovider_types.go | 11 +- ...torage.kubeblocks.io_storageproviders.yaml | 6 + .../dataprotection/backuprepo_controller.go | 240 ++++++++++++++---- .../backuprepo_controller_test.go | 114 ++++++++- controllers/dataprotection/type.go | 28 +- ...torage.kubeblocks.io_storageproviders.yaml | 6 + .../helm/templates/storageprovider/pvc.yaml | 32 +++ 7 files changed, 375 insertions(+), 62 deletions(-) create mode 100644 deploy/helm/templates/storageprovider/pvc.yaml diff --git a/apis/storage/v1alpha1/storageprovider_types.go b/apis/storage/v1alpha1/storageprovider_types.go index 1a5026fc3c6..d92721247ad 100644 --- a/apis/storage/v1alpha1/storageprovider_types.go +++ b/apis/storage/v1alpha1/storageprovider_types.go @@ -37,11 +37,19 @@ type StorageProviderSpec struct { // The template will be rendered with the following variables: // - Parameters: a map of parameters defined in the ParametersSchema. // - CSIDriverSecretRef: the reference of the secret created by the CSIDriverSecretTemplate. - // +kubebuilder:validation:Required + // +optional StorageClassTemplate string `json:"storageClassTemplate,omitempty"` + // A Go template for rendering a PersistentVolumeClaim. + // The template will be rendered with the following variables: + // - Parameters: a map of parameters defined in the ParametersSchema. + // - GeneratedStorageClassName: the name of the storage class generated with the StorageClassTemplate. + // +optional + PersistentVolumeClaimTemplate string `json:"persistentVolumeClaimTemplate,omitempty"` + // The schema describes the parameters required by this StorageProvider, // when rendering the templates. + // +optional ParametersSchema *ParametersSchema `json:"parametersSchema,omitempty"` } @@ -52,6 +60,7 @@ type ParametersSchema struct { // +kubebuilder:validation:Type=object // +kubebuilder:pruning:PreserveUnknownFields // +k8s:conversion-gen=false + // +optional OpenAPIV3Schema *apiextensionsv1.JSONSchemaProps `json:"openAPIV3Schema,omitempty"` // credentialFields are the fields used to generate the secret. diff --git a/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml b/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml index af55709f62e..4cdbf5e72d9 100644 --- a/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml +++ b/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml @@ -64,6 +64,12 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: object + persistentVolumeClaimTemplate: + description: 'A Go template for rendering a PersistentVolumeClaim. + The template will be rendered with the following variables: - Parameters: + a map of parameters defined in the ParametersSchema. - GeneratedStorageClassName: + the name of the storage class generated with the StorageClassTemplate.' + type: string storageClassTemplate: description: 'A Go template for rendering a storage class which will be used by the CSI driver. The template will be rendered with the diff --git a/controllers/dataprotection/backuprepo_controller.go b/controllers/dataprotection/backuprepo_controller.go index e56816e4706..a80971ccc39 100644 --- a/controllers/dataprotection/backuprepo_controller.go +++ b/controllers/dataprotection/backuprepo_controller.go @@ -121,8 +121,8 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) } r.providerRefMapper.setRef(repo, types.NamespacedName{Name: repo.Spec.StorageProviderRef}) - // check storage provider status - provider, err := r.checkStorageProviderStatus(reqCtx, repo) + // check storage provider + provider, err := r.checkStorageProvider(reqCtx, repo) if err != nil { _ = r.updateStatus(reqCtx, repo) return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "check storage provider status failed") @@ -136,14 +136,32 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) return intctrlutil.Reconciled() } + // check parameters for rendering templates + parameters, err := r.checkParameters(reqCtx, repo) + if err != nil { + _ = r.updateStatus(reqCtx, repo) + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "check parameters failed") + } + + renderCtx := renderContext{ + Parameters: parameters, + } + // create StorageClass and Secret for the CSI driver - err = r.createStorageClassAndSecret(reqCtx, repo, provider) + err = r.createStorageClassAndSecret(reqCtx, renderCtx, repo, provider) if err != nil { _ = r.updateStatus(reqCtx, repo) return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "failed to create storage class and secret") } + err = r.checkPVCTemplate(reqCtx, renderCtx, repo, provider) + if err != nil { + _ = r.updateStatus(reqCtx, repo) + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, + "failed to check PVC template") + } + // TODO: implement pre-check logic // 1. try to create a PVC and observe its status // 2. create a pre-check job, mount with the PVC and check job status @@ -157,7 +175,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) // check associated backups, to create PVC in their namespaces if repo.Status.Phase == dpv1alpha1.BackupRepoReady { - if err = r.createPVCForAssociatedBackups(reqCtx, repo); err != nil { + if err = r.createPVCForAssociatedBackups(reqCtx, renderCtx, repo, provider); err != nil { return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "check associated backups failed") } @@ -172,7 +190,9 @@ func (r *BackupRepoReconciler) updateStatus(reqCtx intctrlutil.RequestCtx, repo if repo.Status.Phase != dpv1alpha1.BackupRepoDeleting { phase := dpv1alpha1.BackupRepoFailed if meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeStorageProviderReady) && - meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeStorageClassCreated) { + meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeParametersChecked) && + meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeStorageClassCreated) && + meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypePVCTemplateChecked) { phase = dpv1alpha1.BackupRepoReady } repo.Status.Phase = phase @@ -187,17 +207,24 @@ func (r *BackupRepoReconciler) updateStatus(reqCtx intctrlutil.RequestCtx, repo return nil } -func (r *BackupRepoReconciler) checkStorageProviderStatus( - reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo) (*storagev1alpha1.StorageProvider, error) { +func (r *BackupRepoReconciler) checkStorageProvider( + reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo) (provider *storagev1alpha1.StorageProvider, err error) { var condType = ConditionTypeStorageProviderReady var status metav1.ConditionStatus var reason string var message string + // call updateCondition() when exiting the function. + defer func() { + if status != "" { + err = updateCondition(reqCtx.Ctx, r.Client, repo, condType, status, reason, message) + } + }() + // get storage provider object providerKey := client.ObjectKey{Name: repo.Spec.StorageProviderRef} - provider := &storagev1alpha1.StorageProvider{} - err := r.Client.Get(reqCtx.Ctx, providerKey, provider) + provider = &storagev1alpha1.StorageProvider{} + err = r.Client.Get(reqCtx.Ctx, providerKey, provider) if err != nil { if apierrors.IsNotFound(err) { status = metav1.ConditionFalse @@ -207,10 +234,20 @@ func (r *BackupRepoReconciler) checkStorageProviderStatus( reason = ReasonUnknownError message = err.Error() } - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, status, reason, message) return nil, err } + // check its spec + if provider.Spec.StorageClassTemplate == "" && + provider.Spec.PersistentVolumeClaimTemplate == "" { + // both StorageClassTemplate and PersistentVolumeClaimTemplate are empty. + // in this case, we are unable to create a backup PVC. + status = metav1.ConditionFalse + reason = ReasonInvalidStorageProvider + message = "both StorageClassTemplate and PersistentVolumeClaimTemplate are empty" + return provider, nil + } + // check its status if provider.Status.Phase == storagev1alpha1.StorageProviderReady { status = metav1.ConditionTrue @@ -221,26 +258,48 @@ func (r *BackupRepoReconciler) checkStorageProviderStatus( message = fmt.Sprintf("storage provider %s is not ready, status: %s", provider.Name, provider.Status.Phase) } - if updateErr := updateCondition(reqCtx.Ctx, r.Client, repo, condType, status, reason, message); updateErr != nil { - return nil, updateErr - } return provider, nil } -func (r *BackupRepoReconciler) createStorageClassAndSecret( - reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) error { +func (r *BackupRepoReconciler) checkParameters(reqCtx intctrlutil.RequestCtx, + repo *dpv1alpha1.BackupRepo) (parameters map[string]string, err error) { + condType := ConditionTypeParametersChecked + var status metav1.ConditionStatus + var reason string + var message string + + defer func() { + updateErr := updateCondition(reqCtx.Ctx, r.Client, repo, + condType, status, reason, message) + if err == nil { + err = updateErr + } + }() // collect parameters for rendering templates - parameters, err := r.collectParameters(reqCtx, repo) + parameters, err = r.collectParameters(reqCtx, repo) if err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, ConditionTypeStorageClassCreated, - metav1.ConditionUnknown, ReasonUnknownError, err.Error()) - return fmt.Errorf("failed to collect render parameters: %w", err) + if apierrors.IsNotFound(err) { + status = metav1.ConditionFalse + reason = ReasonCredentialSecretNotFound + message = err.Error() + return nil, err + } else { + status = metav1.ConditionUnknown + reason = ReasonUnknownError + message = err.Error() + } + return nil, err } // TODO: verify parameters - renderCtx := renderContext{ - Parameters: parameters, - } + status = metav1.ConditionTrue + reason = ReasonParametersChecked + return parameters, nil +} + +func (r *BackupRepoReconciler) createStorageClassAndSecret(reqCtx intctrlutil.RequestCtx, + renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) error { + oldRepo := repo.DeepCopy() // create secret for the CSI driver if it's not exist, @@ -259,12 +318,14 @@ func (r *BackupRepoReconciler) createStorageClassAndSecret( } } - // create storage class if it's not exist - if repo.Status.GeneratedStorageClassName == "" { - repo.Status.GeneratedStorageClassName = randomNameForDerivedObject(repo, "sc") - } - if _, err := r.createStorageClass(reqCtx, renderCtx, repo, provider); err != nil { - return err + if provider.Spec.StorageClassTemplate != "" { + // create storage class if it's not exist + if repo.Status.GeneratedStorageClassName == "" { + repo.Status.GeneratedStorageClassName = randomNameForDerivedObject(repo, "sc") + } + if _, err := r.createStorageClass(reqCtx, renderCtx, repo, provider); err != nil { + return err + } } // update other fields @@ -414,6 +475,46 @@ func (r *BackupRepoReconciler) createStorageClass( }) } +func (r *BackupRepoReconciler) checkPVCTemplate(reqCtx intctrlutil.RequestCtx, + renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) error { + + condType := ConditionTypePVCTemplateChecked + checkedTemplateMd5 := repo.Annotations[dataProtectionPVCTemplateMD5MD5AnnotationKey] + currentTemplateMd5 := md5Digest(provider.Spec.PersistentVolumeClaimTemplate) + if provider.Spec.PersistentVolumeClaimTemplate != "" && checkedTemplateMd5 != currentTemplateMd5 { + pvc := &corev1.PersistentVolumeClaim{} + err := r.constructPVCByTemplate(renderCtx, pvc, repo, provider.Spec.PersistentVolumeClaimTemplate) + if err != nil { + _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, + metav1.ConditionFalse, ReasonBadPVCTemplate, err.Error()) + return err + } + } + if err := updateCondition(reqCtx.Ctx, r.Client, repo, condType, + metav1.ConditionTrue, ReasonPVCTemplateChecked, ""); err != nil { + return err + } + return updateAnnotations(reqCtx.Ctx, r.Client, repo, map[string]string{ + dataProtectionPVCTemplateMD5MD5AnnotationKey: currentTemplateMd5, + }) +} + +func (r *BackupRepoReconciler) constructPVCByTemplate( + renderCtx renderContext, pvc *corev1.PersistentVolumeClaim, + repo *dpv1alpha1.BackupRepo, tmpl string) error { + // fill render values + renderCtx.GeneratedStorageClassName = repo.Status.GeneratedStorageClassName + + content, err := renderTemplate("pvc", tmpl, renderCtx) + if err != nil { + return fmt.Errorf("failed to render PVC template: %w", err) + } + if err = yaml.Unmarshal([]byte(content), pvc); err != nil { + return fmt.Errorf("failed to unmarshal PVC object: %w", err) + } + return nil +} + func (r *BackupRepoReconciler) listAssociatedBackups( reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo, extraSelector map[string]string) ([]*dpv1alpha1.Backup, error) { // list backups associated with the repo @@ -437,7 +538,9 @@ func (r *BackupRepoReconciler) listAssociatedBackups( } func (r *BackupRepoReconciler) createPVCForAssociatedBackups( - reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo) error { + reqCtx intctrlutil.RequestCtx, renderCtx renderContext, + repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) error { + backups, err := r.listAssociatedBackups(reqCtx, repo, map[string]string{ dataProtectionNeedRepoPVCKey: trueVal, }) @@ -447,7 +550,7 @@ func (r *BackupRepoReconciler) createPVCForAssociatedBackups( // return any error to reconcile the repo var retErr error for _, backup := range backups { - if err := r.checkOrCreatePVC(reqCtx, repo, backup.Namespace); err != nil { + if err := r.checkOrCreatePVC(reqCtx, renderCtx, repo, provider, backup.Namespace); err != nil { reqCtx.Log.Error(err, "failed to check or create PVC", "namespace", backup.Namespace) retErr = err continue @@ -467,30 +570,47 @@ func (r *BackupRepoReconciler) createPVCForAssociatedBackups( } func (r *BackupRepoReconciler) checkOrCreatePVC( - reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo, namespace string) error { + reqCtx intctrlutil.RequestCtx, renderCtx renderContext, + repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider, namespace string) error { + pvc := &corev1.PersistentVolumeClaim{} - pvc.Name = repo.Status.BackupPVCName - pvc.Namespace = namespace _, err := createObjectIfNotExist(reqCtx.Ctx, r.Client, pvc, func() error { - storageClassName := repo.Status.GeneratedStorageClassName - volumeMode := corev1.PersistentVolumeFilesystem - resources := corev1.ResourceRequirements{} - if !repo.Spec.VolumeCapacity.IsZero() { - resources.Requests = corev1.ResourceList{ - corev1.ResourceStorage: repo.Spec.VolumeCapacity, + if provider.Spec.PersistentVolumeClaimTemplate != "" { + // construct the PVC object by rendering the template + err := r.constructPVCByTemplate(renderCtx, pvc, repo, provider.Spec.PersistentVolumeClaimTemplate) + if err != nil { + return err + } + } else { + // set storage class name to PVC, other fields will be set with default value later + storageClassName := repo.Status.GeneratedStorageClassName + pvc.Spec = corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClassName, } } - pvc.Labels = map[string]string{ - dataProtectionBackupRepoKey: repo.Name, + // overwrite PVC name and namespace + pvc.Name = repo.Status.BackupPVCName + pvc.Namespace = namespace + // add a referencing label + if pvc.Labels == nil { + pvc.Labels = make(map[string]string) } - pvc.Spec = corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteMany, - }, - Resources: resources, - StorageClassName: &storageClassName, - VolumeMode: &volumeMode, + pvc.Labels[dataProtectionBackupRepoKey] = repo.Name + // set default values if not set + if len(pvc.Spec.AccessModes) == 0 { + pvc.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany} + } + if pvc.Spec.VolumeMode == nil { + volumeMode := corev1.PersistentVolumeFilesystem + pvc.Spec.VolumeMode = &volumeMode + } + if pvc.Spec.Resources.Requests == nil { + pvc.Spec.Resources.Requests = corev1.ResourceList{} + } + // note: pvc.Spec.Resources.Requests.Storage() never returns nil + if pvc.Spec.Resources.Requests.Storage().IsZero() { + pvc.Spec.Resources.Requests[corev1.ResourceStorage] = repo.Spec.VolumeCapacity } if err := controllerutil.SetControllerReference(repo, pvc, r.Scheme); err != nil { return fmt.Errorf("failed to set owner reference: %w", err) @@ -722,8 +842,9 @@ func (r *BackupRepoReconciler) SetupWithManager(mgr ctrl.Manager) error { // ============================================================================ type renderContext struct { - Parameters map[string]string - CSIDriverSecretRef corev1.SecretReference + Parameters map[string]string + CSIDriverSecretRef corev1.SecretReference + GeneratedStorageClassName string } func renderTemplate(name, tpl string, rCtx renderContext) (string, error) { @@ -794,6 +915,25 @@ func updateCondition( return c.Status().Patch(ctx, repo, patch) } +func updateAnnotations(ctx context.Context, c client.Client, + repo *dpv1alpha1.BackupRepo, annotations map[string]string) error { + patch := client.MergeFrom(repo.DeepCopy()) + if repo.Annotations == nil { + repo.Annotations = make(map[string]string) + } + updated := false + for k, v := range annotations { + if curr, ok := repo.Annotations[k]; !ok || curr != v { + repo.Annotations[k] = v + updated = true + } + } + if !updated { + return nil + } + return c.Patch(ctx, repo, patch) +} + func md5Digest(s string) string { h := md5.New() h.Write([]byte(s)) diff --git a/controllers/dataprotection/backuprepo_controller_test.go b/controllers/dataprotection/backuprepo_controller_test.go index 804bf5743c0..ff1bc69175d 100644 --- a/controllers/dataprotection/backuprepo_controller_test.go +++ b/controllers/dataprotection/backuprepo_controller_test.go @@ -218,6 +218,13 @@ parameters: return testapps.CreateK8sResource(&testCtx, obj).(*dpv1alpha1.Backup) } + getBackupRepo := func(g Gomega, key types.NamespacedName) *dpv1alpha1.BackupRepo { + repo := &dpv1alpha1.BackupRepo{} + err := testCtx.Cli.Get(testCtx.Ctx, key, repo) + g.Expect(err).ShouldNot(HaveOccurred()) + return repo + } + deleteBackup := func(g Gomega, key types.NamespacedName) { backupObj := &dpv1alpha1.Backup{} err := testCtx.Cli.Get(testCtx.Ctx, key, backupObj) @@ -425,6 +432,10 @@ parameters: By("checking the repo object again, it should be failed") Eventually(testapps.CheckObj(&testCtx, repoKey, func(g Gomega, repo *dpv1alpha1.BackupRepo) { g.Expect(repo.Status.Phase).Should(Equal(dpv1alpha1.BackupRepoFailed)) + cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeParametersChecked) + g.Expect(cond).NotTo(BeNil()) + g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) + g.Expect(cond.Reason).Should(Equal(ReasonCredentialSecretNotFound)) })).Should(Succeed()) }) @@ -499,7 +510,7 @@ parameters: createBackupAndCheckPVC := func(namespace string) (backup *dpv1alpha1.Backup, pvcName string) { By("making sure the repo is ready") Eventually(testapps.CheckObj(&testCtx, repoKey, func(g Gomega, repo *dpv1alpha1.BackupRepo) { - g.Expect(repo.Status.Phase).Should(Equal(dpv1alpha1.BackupRepoReady)) + g.Expect(repo.Status.Phase).Should(Equal(dpv1alpha1.BackupRepoReady), "%+v", repo) g.Expect(repo.Status.BackupPVCName).ShouldNot(BeEmpty()) pvcName = repo.Status.BackupPVCName })).Should(Succeed()) @@ -543,6 +554,107 @@ parameters: createBackupAndCheckPVC(namespace2) }) + Context("storage provider with PersistentVolumeClaimTemplate", func() { + It("should create a PVC in Backup's namespace (in default namespace)", func() { + By("setting the PersistentVolumeClaimTemplate") + Eventually(testapps.GetAndChangeObj(&testCtx, providerKey, func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.PersistentVolumeClaimTemplate = ` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + byPVCTemplate: "true" +spec: + storageClassName: {{ .GeneratedStorageClassName }} + accessModes: + - ReadWriteOnce + resources: + volumeMode: Filesystem +` + })).Should(Succeed()) + _, pvcName := createBackupAndCheckPVC(testCtx.DefaultNamespace) + + Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{Name: pvcName, Namespace: testCtx.DefaultNamespace}, + func(g Gomega, pvc *corev1.PersistentVolumeClaim) { + repo := getBackupRepo(g, repoKey) + g.Expect(pvc.Spec.StorageClassName).ShouldNot(BeNil()) + g.Expect(*pvc.Spec.StorageClassName).Should(Equal(repo.Status.GeneratedStorageClassName)) + g.Expect(pvc.Spec.Resources.Requests.Storage()).ShouldNot(BeNil()) + g.Expect(pvc.Spec.Resources.Requests.Storage().String()).Should(Equal(repo.Spec.VolumeCapacity.String())) + g.Expect(pvc.Spec.AccessModes).Should(Equal([]corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce})) + g.Expect(pvc.Spec.VolumeMode).ShouldNot(BeNil()) + g.Expect(*pvc.Spec.VolumeMode).Should(BeEquivalentTo(corev1.PersistentVolumeFilesystem)) + g.Expect(pvc.Labels["byPVCTemplate"]).Should(Equal("true")) + })).Should(Succeed()) + }) + + It("should fail if the PVC template is invalid", func() { + By("setting a invalid PersistentVolumeClaimTemplate") + Eventually(testapps.GetAndChangeObj(&testCtx, providerKey, func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.PersistentVolumeClaimTemplate = `bad spec` + })).Should(Succeed()) + + By("checking repo's status") + Eventually(testapps.CheckObj(&testCtx, repoKey, func(g Gomega, repo *dpv1alpha1.BackupRepo) { + g.Expect(repo.Status.Phase, dpv1alpha1.BackupRepoFailed) + cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypePVCTemplateChecked) + g.Expect(cond).NotTo(BeNil()) + g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonBadPVCTemplate)) + })).Should(Succeed()) + }) + }) + + Context("storage provider contains only PersistentVolumeClaimTemplate", func() { + BeforeEach(func() { + createStorageProviderSpec(func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.CSIDriverName = "" + provider.Spec.CSIDriverSecretTemplate = "" + provider.Spec.StorageClassTemplate = "" + provider.Spec.PersistentVolumeClaimTemplate = ` +spec: + storageClassName: some.storage.class + accessModes: + - ReadWriteOnce +` + }) + createBackupRepoSpec(nil) + }) + It("should create the PVC based on the PersistentVolumeClaimTemplate", func() { + _, pvcName := createBackupAndCheckPVC(namespace2) + Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{Name: pvcName, Namespace: namespace2}, + func(g Gomega, pvc *corev1.PersistentVolumeClaim) { + g.Expect(pvc.Spec.StorageClassName).ShouldNot(BeNil()) + g.Expect(*pvc.Spec.StorageClassName).Should(Equal("some.storage.class")) + g.Expect(pvc.Spec.AccessModes).Should(Equal([]corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce})) + g.Expect(pvc.Spec.VolumeMode).ShouldNot(BeNil()) + g.Expect(*pvc.Spec.VolumeMode).Should(BeEquivalentTo(corev1.PersistentVolumeFilesystem)) + g.Expect(pvc.Spec.Resources.Requests.Storage()).ShouldNot(BeNil()) + g.Expect(pvc.Spec.Resources.Requests.Storage().String()).Should(Equal(repo.Spec.VolumeCapacity.String())) + })).Should(Succeed()) + }) + }) + + It("should fail if both StorageClassTemplate and PersistentVolumeClaimTemplate are empty", func() { + By("creating a storage provider with empty PersistentVolumeClaimTemplate and StorageClassTemplate") + createStorageProviderSpec(func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.CSIDriverName = "" + provider.Spec.CSIDriverSecretTemplate = "" + provider.Spec.StorageClassTemplate = "" + provider.Spec.PersistentVolumeClaimTemplate = "" + }) + By("creating a backup repo with the storage provider") + createBackupRepoSpec(nil) + By("checking repo's status") + Eventually(testapps.CheckObj(&testCtx, repoKey, func(g Gomega, repo *dpv1alpha1.BackupRepo) { + g.Expect(repo.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupRepoFailed)) + cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeStorageProviderReady) + g.Expect(cond).NotTo(BeNil()) + g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonInvalidStorageProvider)) + })).Should(Succeed()) + }) + It("should block the deletion of the BackupRepo if derived objects are not deleted", func() { backup, pvcName := createBackupAndCheckPVC(namespace2) diff --git a/controllers/dataprotection/type.go b/controllers/dataprotection/type.go index f2e4a09cda8..fea863f5e6d 100644 --- a/controllers/dataprotection/type.go +++ b/controllers/dataprotection/type.go @@ -55,6 +55,7 @@ const ( // annotation keys dataProtectionSecretTemplateMD5AnnotationKey = "dataprotection.kubeblocks.io/secret-template-md5" dataProtectionTemplateValuesMD5AnnotationKey = "dataprotection.kubeblocks.io/template-values-md5" + dataProtectionPVCTemplateMD5MD5AnnotationKey = "dataprotection.kubeblocks.io/pvc-template-md5" // the key of persistentVolumeTemplate in the configmap. persistentVolumeTemplateKey = "persistentVolume" @@ -66,20 +67,27 @@ const ( const ( // condition types ConditionTypeStorageProviderReady = "StorageProviderReady" + ConditionTypeParametersChecked = "ParametersChecked" ConditionTypeStorageClassCreated = "StorageClassCreated" + ConditionTypePVCTemplateChecked = "PVCTemplateChecked" ConditionTypeDerivedObjectsDeleted = "DerivedObjectsDeleted" // condition reasons - ReasonStorageProviderReady = "StorageProviderReady" - ReasonStorageProviderNotReady = "StorageProviderNotReady" - ReasonStorageProviderNotFound = "StorageProviderNotFound" - ReasonBadSecretTemplate = "BadSecretTemplate" - ReasonBadStorageClassTemplate = "BadStorageClassTemplate" - ReasonStorageClassCreated = "StorageClassCreated" - ReasonHaveAssociatedBackups = "HaveAssociatedBackups" - ReasonHaveResidualPVCs = "HaveResidualPVCs" - ReasonDerivedObjectsDeleted = "DerivedObjectsDeleted" - ReasonUnknownError = "UnknownError" + ReasonStorageProviderReady = "StorageProviderReady" + ReasonStorageProviderNotReady = "StorageProviderNotReady" + ReasonStorageProviderNotFound = "StorageProviderNotFound" + ReasonInvalidStorageProvider = "InvalidStorageProvider" + ReasonParametersChecked = "ParametersChecked" + ReasonCredentialSecretNotFound = "CredentialSecretNotFound" + ReasonBadSecretTemplate = "BadSecretTemplate" + ReasonBadStorageClassTemplate = "BadStorageClassTemplate" + ReasonBadPVCTemplate = "BadPVCTemplate" + ReasonStorageClassCreated = "StorageClassCreated" + ReasonPVCTemplateChecked = "PVCTemplateChecked" + ReasonHaveAssociatedBackups = "HaveAssociatedBackups" + ReasonHaveResidualPVCs = "HaveResidualPVCs" + ReasonDerivedObjectsDeleted = "DerivedObjectsDeleted" + ReasonUnknownError = "UnknownError" ) const manifestsUpdaterContainerName = "manifests-updater" diff --git a/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml b/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml index af55709f62e..4cdbf5e72d9 100644 --- a/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml +++ b/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml @@ -64,6 +64,12 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: object + persistentVolumeClaimTemplate: + description: 'A Go template for rendering a PersistentVolumeClaim. + The template will be rendered with the following variables: - Parameters: + a map of parameters defined in the ParametersSchema. - GeneratedStorageClassName: + the name of the storage class generated with the StorageClassTemplate.' + type: string storageClassTemplate: description: 'A Go template for rendering a storage class which will be used by the CSI driver. The template will be rendered with the diff --git a/deploy/helm/templates/storageprovider/pvc.yaml b/deploy/helm/templates/storageprovider/pvc.yaml new file mode 100644 index 00000000000..582f26697b1 --- /dev/null +++ b/deploy/helm/templates/storageprovider/pvc.yaml @@ -0,0 +1,32 @@ +apiVersion: storage.kubeblocks.io/v1alpha1 +kind: StorageProvider +metadata: + name: pvc + labels: + {{- include "kubeblocks.labels" . | nindent 4 }} +spec: + persistentVolumeClaimTemplate: | + spec: + {{- $scName := (include "kubeblocks.defaultStorageClass" .) }} + storageClassName: {{ printf `{{ .Parameters.storageClassName | default %q }}` $scName }} + accessModes: + - {{ `{{ .Parameters.accessMode | default "ReadWriteOnce" }}` }} + volumeMode: {{ `{{ .Parameters.volumeMode | default "Filesystem" }}` }} + + parametersSchema: + openAPIV3Schema: + type: "object" + properties: + storageClassName: + type: string + description: "the name of the StorageClass used to create the PVC" + accessMode: + type: string + description: "the access mode used to create the PVC" + default: "ReadWriteOnce" + enum: ["ReadWriteOnce", "ReadWriteMany", "ReadWriteOncePod"] + volumeMode: + type: string + description: "the volume mode used to create the PVC" + default: "Filesystem" + enum: ["Filesystem", "Block"] \ No newline at end of file From 9476090994a63264ba672242af6a80a8c9ac961e Mon Sep 17 00:00:00 2001 From: linghan-hub <56351212+linghan-hub@users.noreply.github.com> Date: Thu, 21 Sep 2023 14:37:55 +0800 Subject: [PATCH 08/58] chore: add e2e test cases (#5192) --- Makefile | 24 ++++- .../smoketest/foxlake/06_hscale_up.yaml | 4 +- .../oceanbase/00_oceanbasecluster.yaml | 95 +++++++++++++++++++ .../smoketest/oceanbase/01_vscale.yaml | 12 +++ .../testdata/smoketest/oceanbase/02_stop.yaml | 10 ++ .../smoketest/oceanbase/03_start.yaml | 10 ++ .../smoketest/oceanbase/04_vexpand.yaml | 16 ++++ .../smoketest/oceanbase/05_restart.yaml | 10 ++ .../smoketest/oceanbase/06_hscale_up.yaml | 10 ++ .../smoketest/oceanbase/07_hscale_down.yaml | 10 ++ .../00_official_pgcluster.yaml | 91 ++++++++++++++++++ .../official-postgresql/01_vscale.yaml | 12 +++ .../official-postgresql/02_stop.yaml | 10 ++ .../official-postgresql/03_start.yaml | 10 ++ .../official-postgresql/04_vexpand.yaml | 12 +++ .../official-postgresql/05_restart.yaml | 10 ++ .../official-postgresql/06_hscale_up.yaml | 10 ++ .../official-postgresql/07_hscale_down.yaml | 10 ++ .../openldap/00_openldapcluster.yaml | 67 +++++++++++++ .../smoketest/openldap/01_vscale.yaml | 12 +++ .../testdata/smoketest/openldap/02_stop.yaml | 10 ++ .../testdata/smoketest/openldap/03_start.yaml | 10 ++ .../smoketest/openldap/04_restart.yaml | 10 ++ .../smoketest/openldap/05_hscale_up.yaml | 10 ++ .../smoketest/openldap/06_hscale_down.yaml | 10 ++ .../orioledb/00_orioledbcluster.yaml | 93 ++++++++++++++++++ .../smoketest/orioledb/01_vscale.yaml | 12 +++ .../testdata/smoketest/orioledb/02_stop.yaml | 10 ++ .../testdata/smoketest/orioledb/03_start.yaml | 10 ++ .../smoketest/orioledb/04_vexpand.yaml | 12 +++ .../smoketest/orioledb/05_restart.yaml | 10 ++ .../smoketest/orioledb/06_hscale_up.yaml | 10 ++ .../smoketest/orioledb/07_hscale_down.yaml | 10 ++ 33 files changed, 659 insertions(+), 3 deletions(-) create mode 100644 test/e2e/testdata/smoketest/oceanbase/00_oceanbasecluster.yaml create mode 100644 test/e2e/testdata/smoketest/oceanbase/01_vscale.yaml create mode 100644 test/e2e/testdata/smoketest/oceanbase/02_stop.yaml create mode 100644 test/e2e/testdata/smoketest/oceanbase/03_start.yaml create mode 100644 test/e2e/testdata/smoketest/oceanbase/04_vexpand.yaml create mode 100644 test/e2e/testdata/smoketest/oceanbase/05_restart.yaml create mode 100644 test/e2e/testdata/smoketest/oceanbase/06_hscale_up.yaml create mode 100644 test/e2e/testdata/smoketest/oceanbase/07_hscale_down.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/00_official_pgcluster.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/01_vscale.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/02_stop.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/03_start.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/04_vexpand.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/05_restart.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/06_hscale_up.yaml create mode 100644 test/e2e/testdata/smoketest/official-postgresql/07_hscale_down.yaml create mode 100644 test/e2e/testdata/smoketest/openldap/00_openldapcluster.yaml create mode 100644 test/e2e/testdata/smoketest/openldap/01_vscale.yaml create mode 100644 test/e2e/testdata/smoketest/openldap/02_stop.yaml create mode 100644 test/e2e/testdata/smoketest/openldap/03_start.yaml create mode 100644 test/e2e/testdata/smoketest/openldap/04_restart.yaml create mode 100644 test/e2e/testdata/smoketest/openldap/05_hscale_up.yaml create mode 100644 test/e2e/testdata/smoketest/openldap/06_hscale_down.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/00_orioledbcluster.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/01_vscale.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/02_stop.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/03_start.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/04_vexpand.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/05_restart.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/06_hscale_up.yaml create mode 100644 test/e2e/testdata/smoketest/orioledb/07_hscale_down.yaml diff --git a/Makefile b/Makefile index a67ec7d53b6..51a549ffed7 100644 --- a/Makefile +++ b/Makefile @@ -646,8 +646,24 @@ else ifeq ($(TEST_TYPE), kafka) $(HELM) template kafka-cluster deploy/kafka-cluster > test/e2e/testdata/smoketest/kafka/00_kafkacluster.yaml else ifeq ($(TEST_TYPE), foxlake) $(HELM) dependency build deploy/foxlake-cluster --skip-refresh - $(HELM) upgrade --install etcd deploy/foxlake + $(HELM) upgrade --install foxlake deploy/foxlake $(HELM) template foxlake-cluster deploy/foxlake-cluster > test/e2e/testdata/smoketest/foxlake/00_foxlakecluster.yaml +else ifeq ($(TEST_TYPE), oceanbase) + $(HELM) dependency build deploy/oceanbase-cluster --skip-refresh + $(HELM) upgrade --install oceanbase deploy/oceanbase + $(HELM) template oceanbase-cluster deploy/oceanbase-cluster > test/e2e/testdata/smoketest/oceanbase/00_oceanbasecluster.yaml +else ifeq ($(TEST_TYPE), official-postgresql) + $(HELM) dependency build deploy/official-postgresql-cluster --skip-refresh + $(HELM) upgrade --install official-postgresql deploy/official-postgresql + $(HELM) template official-pg deploy/official-postgresql-cluster > test/e2e/testdata/smoketest/official-postgresql/00_official_pgcluster.yaml +else ifeq ($(TEST_TYPE), openldap) + $(HELM) dependency build deploy/openldap-cluster --skip-refresh + $(HELM) upgrade --install openldap deploy/openldap + $(HELM) template openldap-cluster deploy/openldap-cluster > test/e2e/testdata/smoketest/openldap/00_openldapcluster.yaml +else ifeq ($(TEST_TYPE), orioledb) + $(HELM) dependency build deploy/orioledb-cluster --skip-refresh + $(HELM) upgrade --install orioledb deploy/orioledb + $(HELM) template oriole-cluster deploy/orioledb-cluster > test/e2e/testdata/smoketest/orioledb/00_orioledbcluster.yaml else $(error "test type does not exist") endif @@ -685,6 +701,12 @@ else ifeq ($(TEST_TYPE), kafka) $(HELM) upgrade --install kafka deploy/kafka else ifeq ($(TEST_TYPE), foxlake) $(HELM) upgrade --install foxlake deploy/foxlake +else ifeq ($(TEST_TYPE), oceanbase) + $(HELM) upgrade --install oceanbase deploy/oceanbase +else ifeq ($(TEST_TYPE), oceanbase) + $(HELM) upgrade --install official-postgresql deploy/official-postgresql +else ifeq ($(TEST_TYPE), openldap) + $(HELM) upgrade --install openldap deploy/openldap else $(error "test type does not exist") endif diff --git a/test/e2e/testdata/smoketest/foxlake/06_hscale_up.yaml b/test/e2e/testdata/smoketest/foxlake/06_hscale_up.yaml index d4c8ef1a6b7..7b359fd8bf1 100644 --- a/test/e2e/testdata/smoketest/foxlake/06_hscale_up.yaml +++ b/test/e2e/testdata/smoketest/foxlake/06_hscale_up.yaml @@ -7,6 +7,6 @@ spec: type: HorizontalScaling horizontalScaling: - componentName: foxlake-server - replicas: 3 + replicas: 2 - componentName: foxlake-metadb - replicas: 3 \ No newline at end of file + replicas: 2 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/oceanbase/00_oceanbasecluster.yaml b/test/e2e/testdata/smoketest/oceanbase/00_oceanbasecluster.yaml new file mode 100644 index 00000000000..69715495e7f --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/00_oceanbasecluster.yaml @@ -0,0 +1,95 @@ +--- +# Source: oceanbase-cluster/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: obcluster-observer-sa + namespace: "default" + labels: + helm.sh/chart: oceanbase-cluster-0.0.1-alpha1 + app.kubernetes.io/name: oceanbase-cluster + app.kubernetes.io/instance: oceanbase-cluster + app.kubernetes.io/version: "4.2.0.0-100010032023083021" + app.kubernetes.io/managed-by: Helm +--- +# Source: oceanbase-cluster/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: obcluster-statefulset-reader + namespace: "default" + labels: + helm.sh/chart: oceanbase-cluster-0.0.1-alpha1 + app.kubernetes.io/name: oceanbase-cluster + app.kubernetes.io/instance: oceanbase-cluster + app.kubernetes.io/version: "4.2.0.0-100010032023083021" + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: ["apps"] # "" indicates the core API group + resources: ["statefulsets"] + verbs: ["get", "watch", "list"] +--- +# Source: oceanbase-cluster/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: obcluster-read-statefulsets + namespace: "default" + labels: + helm.sh/chart: oceanbase-cluster-0.0.1-alpha1 + app.kubernetes.io/name: oceanbase-cluster + app.kubernetes.io/instance: oceanbase-cluster + app.kubernetes.io/version: "4.2.0.0-100010032023083021" + app.kubernetes.io/managed-by: Helm +subjects: +- kind: ServiceAccount + name: obcluster-observer-sa +- kind: ServiceAccount + name: kb-obcluster +roleRef: + kind: Role + name: obcluster-statefulset-reader + apiGroup: rbac.authorization.k8s.io +--- +# Source: oceanbase-cluster/templates/cluster.yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: oceanbase-cluster + labels: + helm.sh/chart: oceanbase-cluster-0.0.1-alpha1 + app.kubernetes.io/name: oceanbase-cluster + app.kubernetes.io/instance: oceanbase-cluster + app.kubernetes.io/version: "4.2.0.0-100010032023083021" + app.kubernetes.io/managed-by: Helm +spec: + clusterDefinitionRef: oceanbase + clusterVersionRef: oceanbase-4.2.0.0-100010032023083021 + terminationPolicy: Delete + componentSpecs: + - name: ob-bundle + componentDefRef: ob-bundle + serviceAccountName: obcluster-observer-sa + replicas: 3 + volumeClaimTemplates: + - name: data-file + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "50Gi" + - name: data-log + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "50Gi" + - name: log + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "20Gi" diff --git a/test/e2e/testdata/smoketest/oceanbase/01_vscale.yaml b/test/e2e/testdata/smoketest/oceanbase/01_vscale.yaml new file mode 100644 index 00000000000..a7ed2425d20 --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/01_vscale.yaml @@ -0,0 +1,12 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oceanbase-cluster-vscale- +spec: + clusterRef: oceanbase-cluster + type: VerticalScaling + verticalScaling: + - componentName: ob-bundle + requests: + cpu: "1.5" + memory: 1Gi \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/oceanbase/02_stop.yaml b/test/e2e/testdata/smoketest/oceanbase/02_stop.yaml new file mode 100644 index 00000000000..9d750d7cfe9 --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/02_stop.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oceanbase-cluster-stop- +spec: + clusterRef: oceanbase-cluster + ttlSecondsAfterSucceed: 27017 + type: Stop + restart: + - componentName: ob-bundle \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/oceanbase/03_start.yaml b/test/e2e/testdata/smoketest/oceanbase/03_start.yaml new file mode 100644 index 00000000000..49c1b14ae07 --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/03_start.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oceanbase-cluster-start- +spec: + clusterRef: oceanbase-cluster + ttlSecondsAfterSucceed: 27017 + type: Start + restart: + - componentName: ob-bundle \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/oceanbase/04_vexpand.yaml b/test/e2e/testdata/smoketest/oceanbase/04_vexpand.yaml new file mode 100644 index 00000000000..b0af0ba1a5c --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/04_vexpand.yaml @@ -0,0 +1,16 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oceanbase-cluster-vexpand- +spec: + clusterRef: oceanbase-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: ob-bundle + volumeClaimTemplates: + - name: data-file + storage: "51Gi" + - name: data-log + storage: "51Gi" + - name: log + storage: "51Gi" \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/oceanbase/05_restart.yaml b/test/e2e/testdata/smoketest/oceanbase/05_restart.yaml new file mode 100644 index 00000000000..08d3bc9dfaf --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/05_restart.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oceanbase-cluster-restart- +spec: + clusterRef: oceanbase-cluster + ttlSecondsAfterSucceed: 27017 + type: Restart + restart: + - componentName: ob-bundle \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/oceanbase/06_hscale_up.yaml b/test/e2e/testdata/smoketest/oceanbase/06_hscale_up.yaml new file mode 100644 index 00000000000..c4f14b45fe9 --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/06_hscale_up.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oceanbase-cluster-hscale-up- +spec: + clusterRef: oceanbase-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: ob-bundle + replicas: 4 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/oceanbase/07_hscale_down.yaml b/test/e2e/testdata/smoketest/oceanbase/07_hscale_down.yaml new file mode 100644 index 00000000000..ad50ee949d7 --- /dev/null +++ b/test/e2e/testdata/smoketest/oceanbase/07_hscale_down.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oceanbase-cluster-hscale-down- +spec: + clusterRef: oceanbase-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: ob-bundle + replicas: 2 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/official-postgresql/00_official_pgcluster.yaml b/test/e2e/testdata/smoketest/official-postgresql/00_official_pgcluster.yaml new file mode 100644 index 00000000000..c55ece3d8fd --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/00_official_pgcluster.yaml @@ -0,0 +1,91 @@ +--- +# Source: official-postgresql-cluster/templates/rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kb-official-pg + namespace: default + labels: + helm.sh/chart: official-postgresql-cluster-0.7.0-alpha.0 + app.kubernetes.io/version: "14.7" + app.kubernetes.io/instance: official-pg +--- +# Source: official-postgresql-cluster/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kb-official-pg + labels: + helm.sh/chart: official-postgresql-cluster-0.7.0-alpha.0 + app.kubernetes.io/version: "14.7" + app.kubernetes.io/instance: official-pg +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeblocks-volume-protection-pod-role +subjects: + - kind: ServiceAccount + name: kb-official-pg + namespace: default +--- +# Source: official-postgresql-cluster/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kb-official-pg + labels: + helm.sh/chart: official-postgresql-cluster-0.7.0-alpha.0 + app.kubernetes.io/version: "14.7" + app.kubernetes.io/instance: official-pg +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeblocks-cluster-pod-role +subjects: + - kind: ServiceAccount + name: kb-official-pg + namespace: default +--- +# Source: official-postgresql-cluster/templates/cluster.yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: official-pg + namespace: default + labels: + helm.sh/chart: official-postgresql-cluster-0.7.0-alpha.0 + app.kubernetes.io/version: "14.7" + app.kubernetes.io/instance: official-pg +spec: + clusterVersionRef: official-postgresql-14.7 + terminationPolicy: Delete + affinity: + podAntiAffinity: Preferred + topologyKeys: + - kubernetes.io/hostname + tenancy: SharedNode + clusterDefinitionRef: official-postgresql + componentSpecs: + - name: postgresql + componentDefRef: postgresql + monitor: false + replicas: 1 + serviceAccountName: kb-official-pg + switchPolicy: + type: Noop + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data # ref clusterDefinition components.containers.volumeMounts.name + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + services: diff --git a/test/e2e/testdata/smoketest/official-postgresql/01_vscale.yaml b/test/e2e/testdata/smoketest/official-postgresql/01_vscale.yaml new file mode 100644 index 00000000000..dcc21936420 --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/01_vscale.yaml @@ -0,0 +1,12 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: official-pg-vscale- +spec: + clusterRef: official-pg + type: VerticalScaling + verticalScaling: + - componentName: postgresql + requests: + cpu: "1" + memory: 1Gi \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/official-postgresql/02_stop.yaml b/test/e2e/testdata/smoketest/official-postgresql/02_stop.yaml new file mode 100644 index 00000000000..e875d772f0d --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/02_stop.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: official-pg-stop- +spec: + clusterRef: official-pg + ttlSecondsAfterSucceed: 27017 + type: Stop + restart: + - componentName: postgresql \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/official-postgresql/03_start.yaml b/test/e2e/testdata/smoketest/official-postgresql/03_start.yaml new file mode 100644 index 00000000000..79f0aa585ea --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/03_start.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: official-pg-start- +spec: + clusterRef: official-pg + ttlSecondsAfterSucceed: 27017 + type: Start + restart: + - componentName: postgresql \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/official-postgresql/04_vexpand.yaml b/test/e2e/testdata/smoketest/official-postgresql/04_vexpand.yaml new file mode 100644 index 00000000000..3da723e84ac --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/04_vexpand.yaml @@ -0,0 +1,12 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: official-pg-vexpand- +spec: + clusterRef: official-pg + type: VolumeExpansion + volumeExpansion: + - componentName: postgresql + volumeClaimTemplates: + - name: data + storage: "21Gi" \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/official-postgresql/05_restart.yaml b/test/e2e/testdata/smoketest/official-postgresql/05_restart.yaml new file mode 100644 index 00000000000..4bb8b62cf11 --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/05_restart.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: official-pg-restart- +spec: + clusterRef: official-pg + ttlSecondsAfterSucceed: 27017 + type: Restart + restart: + - componentName: postgresql \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/official-postgresql/06_hscale_up.yaml b/test/e2e/testdata/smoketest/official-postgresql/06_hscale_up.yaml new file mode 100644 index 00000000000..ea7b6a98d22 --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/06_hscale_up.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: official-pg-hscale-up- +spec: + clusterRef: official-pg + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + replicas: 3 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/official-postgresql/07_hscale_down.yaml b/test/e2e/testdata/smoketest/official-postgresql/07_hscale_down.yaml new file mode 100644 index 00000000000..53d15ba74a9 --- /dev/null +++ b/test/e2e/testdata/smoketest/official-postgresql/07_hscale_down.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: official-pg-hscale-down- +spec: + clusterRef: official-pg + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + replicas: 2 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/openldap/00_openldapcluster.yaml b/test/e2e/testdata/smoketest/openldap/00_openldapcluster.yaml new file mode 100644 index 00000000000..6c9e7a2b25d --- /dev/null +++ b/test/e2e/testdata/smoketest/openldap/00_openldapcluster.yaml @@ -0,0 +1,67 @@ +--- +# Source: openldap-cluster/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: openldap-cluster + labels: + helm.sh/chart: openldap-cluster-0.1.0-alpha.0 + app.kubernetes.io/name: openldap-cluster + app.kubernetes.io/instance: openldap-cluster + app.kubernetes.io/version: "2.4.57" + app.kubernetes.io/managed-by: Helm +--- +# Source: openldap-cluster/templates/cluster.yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: openldap-cluster + labels: + helm.sh/chart: openldap-cluster-0.1.0-alpha.0 + app.kubernetes.io/name: openldap-cluster + app.kubernetes.io/instance: openldap-cluster + app.kubernetes.io/version: "2.4.57" + app.kubernetes.io/managed-by: Helm +spec: + clusterDefinitionRef: openldap + clusterVersionRef: openldap-2.4.57 + terminationPolicy: Halt + affinity: + topologyKeys: + - kubernetes.io/hostname + componentSpecs: + - name: openldap + componentDefRef: openldap-compdef + replicas: 1 + serviceAccountName: openldap-cluster +--- +# Source: openldap-cluster/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "openldap-cluster-test-connection" + labels: + helm.sh/chart: openldap-cluster-0.1.0-alpha.0 + app.kubernetes.io/name: openldap-cluster + app.kubernetes.io/instance: openldap-cluster + app.kubernetes.io/version: "2.4.57" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test +spec: + containers: + - name: test-openldap-cluster + image: "docker.io/osixia/openldap:1.5.0" + command: + - "ldapsearch" + args: + - "-x" + - "-H" + - "ldap://openldap-cluster-openldap-0.openldap-cluster-openldap-headless.default.svc.cluster.local" + - "-b" + - "dc=kubeblocks,dc=io" + - "-D" + - "cn=admin,dc=kubeblocks,dc=io" + - "-w" + - "admin" + restartPolicy: Never diff --git a/test/e2e/testdata/smoketest/openldap/01_vscale.yaml b/test/e2e/testdata/smoketest/openldap/01_vscale.yaml new file mode 100644 index 00000000000..38917694ad7 --- /dev/null +++ b/test/e2e/testdata/smoketest/openldap/01_vscale.yaml @@ -0,0 +1,12 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: openldap-cluster-vscale- +spec: + clusterRef: openldap-cluster + type: VerticalScaling + verticalScaling: + - componentName: openldap + requests: + cpu: "1" + memory: 1Gi \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/openldap/02_stop.yaml b/test/e2e/testdata/smoketest/openldap/02_stop.yaml new file mode 100644 index 00000000000..bfb3e822a46 --- /dev/null +++ b/test/e2e/testdata/smoketest/openldap/02_stop.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: openldap-cluster-stop- +spec: + clusterRef: openldap-cluster + ttlSecondsAfterSucceed: 27017 + type: Stop + restart: + - componentName: openldap \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/openldap/03_start.yaml b/test/e2e/testdata/smoketest/openldap/03_start.yaml new file mode 100644 index 00000000000..f364fbbb7c6 --- /dev/null +++ b/test/e2e/testdata/smoketest/openldap/03_start.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: openldap-cluster-start- +spec: + clusterRef: openldap-cluster + ttlSecondsAfterSucceed: 27017 + type: Start + restart: + - componentName: openldap \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/openldap/04_restart.yaml b/test/e2e/testdata/smoketest/openldap/04_restart.yaml new file mode 100644 index 00000000000..5299247a7fc --- /dev/null +++ b/test/e2e/testdata/smoketest/openldap/04_restart.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: openldap-cluster-restart- +spec: + clusterRef: openldap-cluster + ttlSecondsAfterSucceed: 27017 + type: Restart + restart: + - componentName: openldap \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/openldap/05_hscale_up.yaml b/test/e2e/testdata/smoketest/openldap/05_hscale_up.yaml new file mode 100644 index 00000000000..56d637c2993 --- /dev/null +++ b/test/e2e/testdata/smoketest/openldap/05_hscale_up.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: openldap-cluster-hscale-up- +spec: + clusterRef: openldap-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: openldap + replicas: 3 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/openldap/06_hscale_down.yaml b/test/e2e/testdata/smoketest/openldap/06_hscale_down.yaml new file mode 100644 index 00000000000..87082b7829b --- /dev/null +++ b/test/e2e/testdata/smoketest/openldap/06_hscale_down.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: openldap-cluster-hscale-down- +spec: + clusterRef: openldap-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: openldap + replicas: 2 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/orioledb/00_orioledbcluster.yaml b/test/e2e/testdata/smoketest/orioledb/00_orioledbcluster.yaml new file mode 100644 index 00000000000..eed85e5b1d8 --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/00_orioledbcluster.yaml @@ -0,0 +1,93 @@ +--- +# Source: orioledb-cluster/templates/rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kb-oriole-cluster + namespace: default + labels: + helm.sh/chart: orioledb-cluster-0.6.0-beta.44 + app.kubernetes.io/version: "14.7.2-beta1" + app.kubernetes.io/instance: oriole-cluster +--- +# Source: orioledb-cluster/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kb-oriole-cluster + labels: + helm.sh/chart: orioledb-cluster-0.6.0-beta.44 + app.kubernetes.io/version: "14.7.2-beta1" + app.kubernetes.io/instance: oriole-cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeblocks-volume-protection-pod-role +subjects: + - kind: ServiceAccount + name: kb-oriole-cluster + namespace: default +--- +# Source: orioledb-cluster/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kb-oriole-cluster + labels: + helm.sh/chart: orioledb-cluster-0.6.0-beta.44 + app.kubernetes.io/version: "14.7.2-beta1" + app.kubernetes.io/instance: oriole-cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeblocks-cluster-pod-role +subjects: + - kind: ServiceAccount + name: kb-oriole-cluster + namespace: default +--- +# Source: orioledb-cluster/templates/cluster.yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: oriole-cluster + namespace: default + labels: + helm.sh/chart: orioledb-cluster-0.6.0-beta.44 + app.kubernetes.io/version: "14.7.2-beta1" + app.kubernetes.io/instance: oriole-cluster +spec: + clusterVersionRef: orioledb-beta1 + terminationPolicy: Delete + affinity: + podAntiAffinity: Preferred + topologyKeys: + - kubernetes.io/hostname + tenancy: SharedNode + clusterDefinitionRef: orioledb + componentSpecs: + - name: orioledb + componentDefRef: orioledb + monitor: false + replicas: 1 + enabledLogs: + - running + serviceAccountName: kb-oriole-cluster + switchPolicy: + type: Noop + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data # ref clusterDefinition components.containers.volumeMounts.name + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + services: diff --git a/test/e2e/testdata/smoketest/orioledb/01_vscale.yaml b/test/e2e/testdata/smoketest/orioledb/01_vscale.yaml new file mode 100644 index 00000000000..1acc234a06e --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/01_vscale.yaml @@ -0,0 +1,12 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oriole-cluster-vscale- +spec: + clusterRef: oriole-cluster + type: VerticalScaling + verticalScaling: + - componentName: orioledb + requests: + cpu: "1" + memory: 1Gi \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/orioledb/02_stop.yaml b/test/e2e/testdata/smoketest/orioledb/02_stop.yaml new file mode 100644 index 00000000000..71c863d73d2 --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/02_stop.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oriole-cluster-stop- +spec: + clusterRef: oriole-cluster + ttlSecondsAfterSucceed: 27017 + type: Stop + restart: + - componentName: orioledb \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/orioledb/03_start.yaml b/test/e2e/testdata/smoketest/orioledb/03_start.yaml new file mode 100644 index 00000000000..e0c3f8a79a8 --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/03_start.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oriole-cluster-start- +spec: + clusterRef: oriole-cluster + ttlSecondsAfterSucceed: 27017 + type: Start + restart: + - componentName: orioledb \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/orioledb/04_vexpand.yaml b/test/e2e/testdata/smoketest/orioledb/04_vexpand.yaml new file mode 100644 index 00000000000..52ccab7c83d --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/04_vexpand.yaml @@ -0,0 +1,12 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oriole-cluster-vexpand- +spec: + clusterRef: oriole-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: orioledb + volumeClaimTemplates: + - name: data + storage: "21Gi" \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/orioledb/05_restart.yaml b/test/e2e/testdata/smoketest/orioledb/05_restart.yaml new file mode 100644 index 00000000000..fe12e90fb48 --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/05_restart.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: oriole-cluster-restart- +spec: + clusterRef: oriole-cluster + ttlSecondsAfterSucceed: 27017 + type: Restart + restart: + - componentName: orioledb \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/orioledb/06_hscale_up.yaml b/test/e2e/testdata/smoketest/orioledb/06_hscale_up.yaml new file mode 100644 index 00000000000..52972bca93b --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/06_hscale_up.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: orioledb-cluster-hscale-up- +spec: + clusterRef: orioledb-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: orioledb + replicas: 3 \ No newline at end of file diff --git a/test/e2e/testdata/smoketest/orioledb/07_hscale_down.yaml b/test/e2e/testdata/smoketest/orioledb/07_hscale_down.yaml new file mode 100644 index 00000000000..5e74ad073de --- /dev/null +++ b/test/e2e/testdata/smoketest/orioledb/07_hscale_down.yaml @@ -0,0 +1,10 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: orioledb-cluster-hscale-down- +spec: + clusterRef: orioledb-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: orioledb + replicas: 2 \ No newline at end of file From 197095a0438b561c583cb3f162ae9aec81ab080c Mon Sep 17 00:00:00 2001 From: kubeJocker <102039539+kubeJocker@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:28:02 +0800 Subject: [PATCH 09/58] fix: expose error info correctly when preflight (#5218) --- internal/preflight/analyzer/kb_storage_class.go | 11 ++++++++--- internal/preflight/analyzer/kb_taint.go | 9 +++++++-- internal/preflight/collect.go | 7 +------ 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/internal/preflight/analyzer/kb_storage_class.go b/internal/preflight/analyzer/kb_storage_class.go index 0f63d74e355..784cf4ee81d 100644 --- a/internal/preflight/analyzer/kb_storage_class.go +++ b/internal/preflight/analyzer/kb_storage_class.go @@ -33,7 +33,7 @@ import ( const ( StorageClassPath = "cluster-resources/storage-classes.json" - StorageClassErrorPath = "cluster-resources/storage-classes-error.json" + StorageClassErrorPath = "cluster-resources/storage-classes-errors.json" ) type AnalyzeStorageClassByKb struct { @@ -68,8 +68,13 @@ func (a *AnalyzeStorageClassByKb) analyzeStorageClass(analyzer *preflightv1beta2 } storageClassesErrorData, err := getFile(StorageClassErrorPath) - if err != nil && storageClassesErrorData != nil && len(storageClassesErrorData) > 0 && len(storageClassesData) == 0 { - return newWarnResultWithMessage(a.Title(), fmt.Sprintf("get nodes list from k8s failed, err:%v", err)), err + if err == nil && storageClassesErrorData != nil && len(storageClassesErrorData) > 0 && len(storageClassesData) == 0 { + var values []string + err = json.Unmarshal(storageClassesErrorData, &values) + if err != nil || len(values) == 0 { + return newWarnResultWithMessage(a.Title(), fmt.Sprintf("get storage class failed, err:%v", storageClassesErrorData)), err + } + return newWarnResultWithMessage(a.Title(), fmt.Sprintf("get storage class failed, err:%v", values[0])), nil } var storageClasses storagev1beta1.StorageClassList diff --git a/internal/preflight/analyzer/kb_taint.go b/internal/preflight/analyzer/kb_taint.go index 6989aa37f7f..6dddd47b1fd 100644 --- a/internal/preflight/analyzer/kb_taint.go +++ b/internal/preflight/analyzer/kb_taint.go @@ -38,7 +38,7 @@ import ( const ( NodesPath = "cluster-resources/nodes.json" - NodesErrorPath = "cluster-resources/nodes-error.json" + NodesErrorPath = "cluster-resources/nodes-errors.json" Tolerations = "tolerations" KubeBlocks = "kubeblocks" ) @@ -77,7 +77,12 @@ func (a *AnalyzeTaintClassByKb) analyzeTaint(getFile GetCollectedFileContents, f nodesErrorData, err := getFile(NodesErrorPath) if err != nil && nodesErrorData != nil && len(nodesErrorData) > 0 && len(nodesData) == 0 { - return newFailedResultWithMessage(a.Title(), fmt.Sprintf("get nodes list from k8s failed, err:%v", err)), err + var values []string + err = json.Unmarshal(nodesErrorData, &values) + if err != nil || len(values) == 0 { + return newFailedResultWithMessage(a.Title(), fmt.Sprintf("get nodes from k8s failed, err:%v", nodesErrorData)), err + } + return newFailedResultWithMessage(a.Title(), fmt.Sprintf("get nodes from k8s failed, err:%v", values[0])), nil } var nodes v1.NodeList diff --git a/internal/preflight/collect.go b/internal/preflight/collect.go index 06dcaa11dd9..d41faa31330 100644 --- a/internal/preflight/collect.go +++ b/internal/preflight/collect.go @@ -24,7 +24,6 @@ import ( "encoding/json" "fmt" "reflect" - "strings" "time" "github.com/pkg/errors" @@ -328,7 +327,7 @@ func handleStorageClassError(ctx context.Context, _ preflight.CollectOpts, clien return } var errorStrs []string - if err := json.Unmarshal(storageClassError, &errorStrs); err != nil || len(errorStrs) == 0 || !isMetricsUnavailableError(errorStrs[0]) { + if err := json.Unmarshal(storageClassError, &errorStrs); err != nil || len(errorStrs) == 0 { return } storageClasses, err := client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) @@ -354,10 +353,6 @@ func handleStorageClassError(ctx context.Context, _ preflight.CollectOpts, clien data[StorageClassPath] = scBytes } -func isMetricsUnavailableError(str string) bool { - return strings.Contains(str, "the server is currently unable to handle the request") -} - func CollectRemoteData(ctx context.Context, preflightSpec *preflightv1beta2.HostPreflight, f cmdutil.Factory, progressCh chan interface{}) (*preflight.CollectResult, error) { v := viper.GetViper() From e78f3fc1b18b95b03739e49dcf8ca56accf8b8f8 Mon Sep 17 00:00:00 2001 From: huyongqii <129354195+huyongqii@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:37:02 +0800 Subject: [PATCH 10/58] fix: fix kbcli login (#5212) --- .../callback_html/complete.html | 0 .../callback_html/error.html | 0 .../authenticator/callback_listener.go | 20 +++++++++++++------ .../cli/cmd/auth/authorize/cached_provider.go | 2 +- .../cli/cmd/auth/authorize/token_provider.go | 1 - internal/cli/cmd/cluster/register.go | 2 +- 6 files changed, 16 insertions(+), 9 deletions(-) rename internal/cli/cmd/auth/authorize/{ => authenticator}/callback_html/complete.html (100%) rename internal/cli/cmd/auth/authorize/{ => authenticator}/callback_html/error.html (100%) diff --git a/internal/cli/cmd/auth/authorize/callback_html/complete.html b/internal/cli/cmd/auth/authorize/authenticator/callback_html/complete.html similarity index 100% rename from internal/cli/cmd/auth/authorize/callback_html/complete.html rename to internal/cli/cmd/auth/authorize/authenticator/callback_html/complete.html diff --git a/internal/cli/cmd/auth/authorize/callback_html/error.html b/internal/cli/cmd/auth/authorize/authenticator/callback_html/error.html similarity index 100% rename from internal/cli/cmd/auth/authorize/callback_html/error.html rename to internal/cli/cmd/auth/authorize/authenticator/callback_html/error.html diff --git a/internal/cli/cmd/auth/authorize/authenticator/callback_listener.go b/internal/cli/cmd/auth/authorize/authenticator/callback_listener.go index 23e8908cc05..672d678b02f 100644 --- a/internal/cli/cmd/auth/authorize/authenticator/callback_listener.go +++ b/internal/cli/cmd/auth/authorize/authenticator/callback_listener.go @@ -21,6 +21,7 @@ package authenticator import ( "context" + "embed" "errors" "fmt" "log" @@ -28,11 +29,17 @@ import ( "os" "os/signal" "strings" + + "github.com/leaanthony/debme" +) + +var ( + //go:embed callback_html/* + callbackHTML embed.FS ) const ( ListenerAddress = "127.0.0.1" - DIR = "./internal/cli/cmd/auth/authorize/callback_html/" ) type HTTPServer interface { @@ -114,18 +121,19 @@ func (c *CallbackService) awaitResponse(callbackResponse chan CallbackResponse, }) } -func writeHTML(w http.ResponseWriter, file string) { - htmlContent, err := os.ReadFile(DIR + file) +func writeHTML(w http.ResponseWriter, fileName string) { + tmplFs, _ := debme.FS(callbackHTML, "callback_html") + tmlBytes, err := tmplFs.ReadFile(fileName) if err != nil { http.Error(w, "Failed to read HTML file", http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/html") - write(w, string(htmlContent)) + write(w, tmlBytes) } -func write(w http.ResponseWriter, msg string) { - _, err := w.Write([]byte(msg)) +func write(w http.ResponseWriter, msg []byte) { + _, err := w.Write(msg) if err != nil { fmt.Println("Error writing response:", err) } diff --git a/internal/cli/cmd/auth/authorize/cached_provider.go b/internal/cli/cmd/auth/authorize/cached_provider.go index e24ddca6052..f870d4d641a 100644 --- a/internal/cli/cmd/auth/authorize/cached_provider.go +++ b/internal/cli/cmd/auth/authorize/cached_provider.go @@ -38,7 +38,7 @@ const ( tokenFile = "token.json" keyringKey = "token" - keyringService = "kueblocks" + keyringService = "kubeblocks" keyringLabel = "KUBEBLOCKS CLI" fileMode = 0o600 diff --git a/internal/cli/cmd/auth/authorize/token_provider.go b/internal/cli/cmd/auth/authorize/token_provider.go index f802c3de870..150545d7b9c 100644 --- a/internal/cli/cmd/auth/authorize/token_provider.go +++ b/internal/cli/cmd/auth/authorize/token_provider.go @@ -88,7 +88,6 @@ func (p *TokenProvider) Login(ctx context.Context) (*authenticator.UserInfoRespo if err != nil { return nil, "", errors.Wrap(err, "could not cache tokens") } - fmt.Println(tokenResult.IDToken) return userInfo, tokenResult.IDToken, nil } diff --git a/internal/cli/cmd/cluster/register.go b/internal/cli/cmd/cluster/register.go index 3ae1bbbf69b..1bf152013fd 100644 --- a/internal/cli/cmd/cluster/register.go +++ b/internal/cli/cmd/cluster/register.go @@ -28,7 +28,6 @@ import ( "regexp" "time" - "github.com/apecloud/kubeblocks/internal/cli/util/helm" "github.com/asaskevich/govalidator" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" @@ -36,6 +35,7 @@ import ( "k8s.io/kubectl/pkg/util/templates" "github.com/apecloud/kubeblocks/internal/cli/cluster" + "github.com/apecloud/kubeblocks/internal/cli/util/helm" "github.com/apecloud/kubeblocks/internal/cli/util/prompt" ) From 8309d8d48245b86a22ee080d90445ac6123a6768 Mon Sep 17 00:00:00 2001 From: free6om Date: Thu, 21 Sep 2023 15:42:51 +0800 Subject: [PATCH 11/58] chore: code cleanup (#5203) 1. delete components(consensus_set, replication_set, stateful_set, stateless_set) and related types and utils 2. delete duplicated role changed event handler 3. delete deployment cue file 4. delete useless `enqueue_ancestor` event handler 5. build component member status by observing `rsm.status.membersStatus` to avoid updating `cluster.status` in event_controller 6. delete rsm `if/else` branch, make rsm permanently in ut 7. merge base.go, rsm_component.go, rsm_workload.go into component.go Co-authored-by: Leon --- apis/apps/v1alpha1/cluster_types.go | 7 + apis/apps/v1alpha1/zz_generated.deepcopy.go | 5 + .../bases/apps.kubeblocks.io_clusters.yaml | 42 + controllers/apps/cluster_controller_test.go | 486 ++---- .../apps/cluster_status_event_handler_test.go | 22 +- controllers/apps/components/base.go | 652 ------- controllers/apps/components/base_stateful.go | 999 ----------- .../apps/components/base_stateful_legacy.go | 752 -------- controllers/apps/components/component.go | 1512 ++++++++++++++++- controllers/apps/components/component_set.go | 71 - ...ilder.go => component_workload_builder.go} | 156 +- controllers/apps/components/consensus.go | 107 -- controllers/apps/components/consensus_set.go | 270 --- .../apps/components/consensus_set_test.go | 168 -- .../apps/components/consensus_set_utils.go | 425 ----- .../components/consensus_set_utils_test.go | 208 --- .../apps/components/consensus_workload.go | 53 - ...l_hscale.go => hscale_volume_populator.go} | 0 controllers/apps/components/plan.go | 82 - controllers/apps/components/plan_test.go | 67 - controllers/apps/components/replication.go | 107 -- .../apps/components/replication_set.go | 277 --- .../apps/components/replication_set_test.go | 282 --- .../apps/components/replication_set_utils.go | 214 --- .../components/replication_set_utils_test.go | 278 --- .../apps/components/replication_workload.go | 50 - controllers/apps/components/rsm.go | 107 -- controllers/apps/components/rsm_set.go | 337 ---- controllers/apps/components/rsm_set_test.go | 212 --- controllers/apps/components/rsm_workload.go | 46 - controllers/apps/components/stateful.go | 105 -- controllers/apps/components/stateful_set.go | 274 --- .../apps/components/stateful_set_test.go | 181 -- .../apps/components/stateful_workload.go | 30 - controllers/apps/components/stateless.go | 304 ---- controllers/apps/components/stateless_set.go | 196 --- .../apps/components/stateless_set_test.go | 173 -- .../apps/components/stateless_workload.go | 43 - controllers/apps/components/status.go | 63 - controllers/apps/components/status_test.go | 520 ------ controllers/apps/components/types.go | 108 +- controllers/apps/components/utils.go | 103 -- controllers/apps/components/utils_test.go | 115 +- .../configuration/parallel_upgrade_policy.go | 10 +- controllers/apps/configuration/policy_util.go | 38 +- .../apps/configuration/policy_util_test.go | 24 +- .../configuration/rolling_upgrade_policy.go | 15 +- .../apps/configuration/simple_policy.go | 10 +- .../apps/configuration/sync_upgrade_policy.go | 8 +- controllers/apps/configuration/types.go | 22 +- .../apps/operations/ops_progress_util.go | 23 +- .../apps/operations/switchover_util.go | 5 +- .../apps/opsrequest_controller_test.go | 117 +- controllers/apps/tls_utils_test.go | 13 +- .../apps/transformer_cluster_deletion.go | 9 +- controllers/k8score/const.go | 11 - .../k8score/role_change_event_handler.go | 139 -- .../crds/apps.kubeblocks.io_clusters.yaml | 42 + .../common}/stateful_set_utils.go | 96 +- .../common}/stateful_set_utils_test.go | 71 +- .../builder/builder_container_test.go | 4 +- internal/controller/factory/builder.go | 157 -- internal/controller/factory/builder_test.go | 23 - .../factory/cue/deployment_template.cue | 84 - internal/controller/plan/prepare_test.go | 60 +- internal/controller/rsm/enqueue_ancestor.go | 333 ---- .../controller/rsm/enqueue_ancestor_test.go | 399 ----- .../controller/rsm/pod_role_event_handler.go | 3 - internal/controller/rsm/update_plan.go | 4 +- internal/controller/rsm/utils.go | 18 +- internal/controller/rsm/utils_test.go | 8 +- internal/controllerutil/pod_utils.go | 15 + internal/generics/type.go | 2 +- lorry/client/client.go | 2 +- test/integration/controller_suite_test.go | 4 +- test/integration/mysql_ha_test.go | 4 +- test/integration/mysql_reconfigure_test.go | 4 +- test/integration/redis_hscale_test.go | 4 +- 78 files changed, 1953 insertions(+), 10037 deletions(-) delete mode 100644 controllers/apps/components/base.go delete mode 100644 controllers/apps/components/base_stateful.go delete mode 100644 controllers/apps/components/base_stateful_legacy.go delete mode 100644 controllers/apps/components/component_set.go rename controllers/apps/components/{workload_builder.go => component_workload_builder.go} (64%) delete mode 100644 controllers/apps/components/consensus.go delete mode 100644 controllers/apps/components/consensus_set.go delete mode 100644 controllers/apps/components/consensus_set_test.go delete mode 100644 controllers/apps/components/consensus_set_utils.go delete mode 100644 controllers/apps/components/consensus_set_utils_test.go delete mode 100644 controllers/apps/components/consensus_workload.go rename controllers/apps/components/{base_stateful_hscale.go => hscale_volume_populator.go} (100%) delete mode 100644 controllers/apps/components/plan.go delete mode 100644 controllers/apps/components/plan_test.go delete mode 100644 controllers/apps/components/replication.go delete mode 100644 controllers/apps/components/replication_set.go delete mode 100644 controllers/apps/components/replication_set_test.go delete mode 100644 controllers/apps/components/replication_set_utils.go delete mode 100644 controllers/apps/components/replication_set_utils_test.go delete mode 100644 controllers/apps/components/replication_workload.go delete mode 100644 controllers/apps/components/rsm.go delete mode 100644 controllers/apps/components/rsm_set.go delete mode 100644 controllers/apps/components/rsm_set_test.go delete mode 100644 controllers/apps/components/rsm_workload.go delete mode 100644 controllers/apps/components/stateful.go delete mode 100644 controllers/apps/components/stateful_set.go delete mode 100644 controllers/apps/components/stateful_set_test.go delete mode 100644 controllers/apps/components/stateful_workload.go delete mode 100644 controllers/apps/components/stateless.go delete mode 100644 controllers/apps/components/stateless_set.go delete mode 100644 controllers/apps/components/stateless_set_test.go delete mode 100644 controllers/apps/components/stateless_workload.go delete mode 100644 controllers/apps/components/status.go delete mode 100644 controllers/apps/components/status_test.go delete mode 100644 controllers/k8score/role_change_event_handler.go rename {controllers/apps/components => internal/common}/stateful_set_utils.go (57%) rename {controllers/apps/components => internal/common}/stateful_set_utils_test.go (50%) delete mode 100644 internal/controller/factory/cue/deployment_template.cue delete mode 100644 internal/controller/rsm/enqueue_ancestor.go delete mode 100644 internal/controller/rsm/enqueue_ancestor_test.go diff --git a/apis/apps/v1alpha1/cluster_types.go b/apis/apps/v1alpha1/cluster_types.go index ccf65cc8507..39a1f728440 100644 --- a/apis/apps/v1alpha1/cluster_types.go +++ b/apis/apps/v1alpha1/cluster_types.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -349,11 +350,17 @@ type ClusterComponentStatus struct { // consensusSetStatus specifies the mapping of role and pod name. // +optional + //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use MembersStatus instead." ConsensusSetStatus *ConsensusSetStatus `json:"consensusSetStatus,omitempty"` // replicationSetStatus specifies the mapping of role and pod name. // +optional + //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use MembersStatus instead." ReplicationSetStatus *ReplicationSetStatus `json:"replicationSetStatus,omitempty"` + + // members' status. + // +optional + MembersStatus []workloads.MemberStatus `json:"membersStatus,omitempty"` } type ConsensusSetStatus struct { diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index e3a08ddd5ae..780fd1369b0 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -642,6 +642,11 @@ func (in *ClusterComponentStatus) DeepCopyInto(out *ClusterComponentStatus) { *out = new(ReplicationSetStatus) (*in).DeepCopyInto(*out) } + if in.MembersStatus != nil { + in, out := &in.MembersStatus, &out.MembersStatus + *out = make([]workloadsv1alpha1.MemberStatus, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterComponentStatus. diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml index 18c2abf550a..3815b20abf1 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml @@ -873,6 +873,48 @@ spec: required: - leader type: object + membersStatus: + description: members' status. + items: + properties: + podName: + default: Unknown + description: PodName pod name. + type: string + role: + properties: + accessMode: + default: ReadWrite + description: AccessMode, what service this member + capable. + enum: + - None + - Readonly + - ReadWrite + type: string + canVote: + default: true + description: CanVote, whether this member has voting + rights + type: boolean + isLeader: + default: false + description: IsLeader, whether this member is the + leader + type: boolean + name: + default: leader + description: Name, role name. + type: string + required: + - accessMode + - name + type: object + required: + - podName + - role + type: object + type: array message: additionalProperties: type: string diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 7da5453691d..70617c21118 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -54,6 +54,7 @@ import ( dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/controllers/apps/components" + "github.com/apecloud/kubeblocks/internal/common" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/generics" @@ -448,36 +449,11 @@ var _ = Describe("Cluster Controller", func() { } checkSingleWorkload := func(compDefName string, expects func(g Gomega, sts *appsv1.StatefulSet, deploy *appsv1.Deployment)) { - if intctrlutil.IsRSMEnabled() { - Eventually(func(g Gomega) { - l := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - sts := components.ConvertRSMToSTS(&l.Items[0]) - expects(g, sts, nil) - }).Should(Succeed()) - return - } - - isStsWorkload := true - switch compDefName { - case statelessCompDefName: - isStsWorkload = false - case statefulCompDefName, replicationCompDefName, consensusCompDefName: - break - default: - panic("unreachable") - } - - if isStsWorkload { - Eventually(func(g Gomega) { - l := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - expects(g, &l.Items[0], nil) - }).Should(Succeed()) - } else { - Eventually(func(g Gomega) { - l := testk8s.ListAndCheckDeployment(&testCtx, clusterKey) - expects(g, nil, &l.Items[0]) - }).Should(Succeed()) - } + Eventually(func(g Gomega) { + l := testk8s.ListAndCheckRSM(&testCtx, clusterKey) + sts := components.ConvertRSMToSTS(&l.Items[0]) + expects(g, sts, nil) + }).Should(Succeed()) } testChangeReplicas := func(compName, compDefName string) { @@ -579,15 +555,9 @@ var _ = Describe("Cluster Controller", func() { By("Mocking component PVCs to bound") mockComponentPVCsBound(comp, int(comp.Replicas), true) - if intctrlutil.IsRSMEnabled() { - By("Checking rsm replicas right") - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) - Expect(int(*rsmList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Replicas)) - } else { - By("Checking sts replicas right") - stsList := testk8s.ListAndCheckStatefulSetWithComponent(&testCtx, clusterKey, comp.Name) - Expect(int(*stsList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Replicas)) - } + By("Checking rsm replicas right") + rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) + Expect(int(*rsmList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Replicas)) By("Creating mock pods in StatefulSet") pods := mockPodsForTest(clusterObj, int(comp.Replicas)) @@ -611,12 +581,8 @@ var _ = Describe("Cluster Controller", func() { checkUpdatedStsReplicas := func() { By("Checking updated sts replicas") Eventually(func() int32 { - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) - return *rsmList.Items[0].Spec.Replicas - } - stsList := testk8s.ListAndCheckStatefulSetWithComponent(&testCtx, clusterKey, comp.Name) - return *stsList.Items[0].Spec.Replicas + rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) + return *rsmList.Items[0].Spec.Replicas }).Should(BeEquivalentTo(updatedReplicas)) } @@ -743,38 +709,6 @@ var _ = Describe("Cluster Controller", func() { })).Should(Succeed()) } } - - if !intctrlutil.IsRSMEnabled() { - By("Checking pod env config updated") - cmKey := types.NamespacedName{ - Namespace: clusterKey.Namespace, - Name: fmt.Sprintf("%s-%s-env", clusterKey.Name, comp.Name), - } - Eventually(testapps.CheckObj(&testCtx, cmKey, func(g Gomega, cm *corev1.ConfigMap) { - match := func(key, prefix, suffix string) bool { - return strings.HasPrefix(key, prefix) && strings.HasSuffix(key, suffix) - } - foundN := "" - for k, v := range cm.Data { - if match(k, constant.KBPrefix, "_N") { - foundN = v - break - } - } - g.Expect(foundN).Should(Equal(strconv.Itoa(updatedReplicas))) - for i := 0; i < updatedReplicas; i++ { - foundPodHostname := "" - suffix := fmt.Sprintf("_%d_HOSTNAME", i) - for k, v := range cm.Data { - if match(k, constant.KBPrefix, suffix) { - foundPodHostname = v - break - } - } - g.Expect(foundPodHostname != "").Should(BeTrue()) - } - })).Should(Succeed()) - } } scaleInCheck := func() { @@ -1009,18 +943,12 @@ var _ = Describe("Cluster Controller", func() { Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(1)) By("Checking the replicas") - var sts *appsv1.StatefulSet - var rsm *workloads.ReplicatedStateMachine - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - rsm = &rsmList.Items[0] - sts = testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterObj.Name, compName). - SetReplicas(*rsm.Spec.Replicas). - Create(&testCtx).GetObject() - } else { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - sts = &stsList.Items[0] - } + rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) + rsm := &rsmList.Items[0] + sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterObj.Name, compName). + SetReplicas(*rsm.Spec.Replicas). + Create(&testCtx).GetObject() + Expect(*sts.Spec.Replicas).Should(BeEquivalentTo(replicas)) By("Mock PVCs in Bound Status") @@ -1057,11 +985,9 @@ var _ = Describe("Cluster Controller", func() { case statefulCompDefName, consensusCompDefName: mockPods = testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, compName) } - if intctrlutil.IsRSMEnabled() { - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, mockPods...) - })).ShouldNot(HaveOccurred()) - } + Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { + testk8s.MockRSMReady(rsm, mockPods...) + })).ShouldNot(HaveOccurred()) Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) })).ShouldNot(HaveOccurred()) @@ -1170,14 +1096,9 @@ var _ = Describe("Cluster Controller", func() { Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(1)) By("Checking the replicas") - var numbers int32 - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - numbers = *rsmList.Items[0].Spec.Replicas - } else { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - numbers = *stsList.Items[0].Spec.Replicas - } + rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) + numbers := *rsmList.Items[0].Spec.Replicas + Expect(numbers).Should(BeEquivalentTo(replicas)) By("Mock PVCs in Bound Status") @@ -1241,13 +1162,8 @@ var _ = Describe("Cluster Controller", func() { Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(2)) By("Checking PVCs are resized") - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - numbers = *rsmList.Items[0].Spec.Replicas - } else { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - numbers = *stsList.Items[0].Spec.Replicas - } + rsmList = testk8s.ListAndCheckRSM(&testCtx, clusterKey) + numbers = *rsmList.Items[0].Spec.Replicas for i := numbers - 1; i >= 0; i-- { pvc := &corev1.PersistentVolumeClaim{} pvcKey := types.NamespacedName{ @@ -1270,13 +1186,8 @@ var _ = Describe("Cluster Controller", func() { By("Checking PVCs are resized") Eventually(func(g Gomega) { - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - numbers = *rsmList.Items[0].Spec.Replicas - } else { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - numbers = *stsList.Items[0].Spec.Replicas - } + rsmList = testk8s.ListAndCheckRSM(&testCtx, clusterKey) + numbers = *rsmList.Items[0].Spec.Replicas for i := numbers - 1; i >= 0; i-- { pvc := &corev1.PersistentVolumeClaim{} pvcKey := types.NamespacedName{ @@ -1541,7 +1452,7 @@ var _ = Describe("Cluster Controller", func() { } getStsPodsName := func(sts *appsv1.StatefulSet) []string { - pods, err := components.GetPodListByStatefulSet(ctx, k8sClient, sts) + pods, err := common.GetPodListByStatefulSet(ctx, k8sClient, sts) Expect(err).To(Succeed()) names := make([]string, 0) @@ -1566,25 +1477,16 @@ var _ = Describe("Cluster Controller", func() { By("Waiting for the cluster controller to create resources completely") waitForCreatingResourceCompletely(clusterKey, compName) - var sts *appsv1.StatefulSet var rsm *workloads.ReplicatedStateMachine - if intctrlutil.IsRSMEnabled() { - Eventually(func(g Gomega) { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - g.Expect(rsmList.Items).ShouldNot(BeEmpty()) - rsm = &rsmList.Items[0] - }).Should(Succeed()) - sts = testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). - AddAppComponentLabel(rsm.Labels[constant.KBAppComponentLabelKey]). - AddAppInstanceLabel(rsm.Labels[constant.AppInstanceLabelKey]). - SetReplicas(*rsm.Spec.Replicas).Create(&testCtx).GetObject() - } else { - Eventually(func(g Gomega) { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - g.Expect(stsList.Items).ShouldNot(BeEmpty()) - sts = &stsList.Items[0] - }).Should(Succeed()) - } + Eventually(func(g Gomega) { + rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) + g.Expect(rsmList.Items).ShouldNot(BeEmpty()) + rsm = &rsmList.Items[0] + }).Should(Succeed()) + sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). + AddAppComponentLabel(rsm.Labels[constant.KBAppComponentLabelKey]). + AddAppInstanceLabel(rsm.Labels[constant.AppInstanceLabelKey]). + SetReplicas(*rsm.Spec.Replicas).Create(&testCtx).GetObject() By("Creating mock pods in StatefulSet, and set controller reference") pods := mockPodsForTest(clusterObj, replicas) @@ -1609,7 +1511,7 @@ var _ = Describe("Cluster Controller", func() { By("Checking pods' role are changed accordingly") Eventually(func(g Gomega) { - pods, err := components.GetPodListByStatefulSet(ctx, k8sClient, sts) + pods, err := common.GetPodListByStatefulSet(ctx, k8sClient, sts) g.Expect(err).ShouldNot(HaveOccurred()) // should have 3 pods g.Expect(pods).Should(HaveLen(3)) @@ -1628,15 +1530,13 @@ var _ = Describe("Cluster Controller", func() { g.Expect(followerCount).Should(Equal(2)) }).Should(Succeed()) - if intctrlutil.IsRSMEnabled() { - // trigger rsm to reconcile as the underlying sts is not created - Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(sts), func(rsm *workloads.ReplicatedStateMachine) { - rsm.Annotations = map[string]string{"time": time.Now().Format(time.RFC3339)} - })()).Should(Succeed()) - } + // trigger rsm to reconcile as the underlying sts is not created + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(sts), func(rsm *workloads.ReplicatedStateMachine) { + rsm.Annotations = map[string]string{"time": time.Now().Format(time.RFC3339)} + })()).Should(Succeed()) By("Checking pods' annotations") Eventually(func(g Gomega) { - pods, err := components.GetPodListByStatefulSet(ctx, k8sClient, sts) + pods, err := common.GetPodListByStatefulSet(ctx, k8sClient, sts) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(pods).Should(HaveLen(int(*sts.Spec.Replicas))) for _, pod := range pods { @@ -1644,19 +1544,18 @@ var _ = Describe("Cluster Controller", func() { g.Expect(pod.Annotations[constant.ComponentReplicasAnnotationKey]).Should(Equal(strconv.Itoa(int(*sts.Spec.Replicas)))) } }).Should(Succeed()) - if intctrlutil.IsRSMEnabled() { - rsmPatch := client.MergeFrom(rsm.DeepCopy()) - By("Updating RSM's status") - rsm.Status.UpdateRevision = "mock-version" - pods, err := components.GetPodListByStatefulSet(ctx, k8sClient, sts) - Expect(err).Should(BeNil()) - var podList []*corev1.Pod - for i := range pods { - podList = append(podList, &pods[i]) - } - testk8s.MockRSMReady(rsm, podList...) - Expect(k8sClient.Status().Patch(ctx, rsm, rsmPatch)).Should(Succeed()) - } + rsmPatch := client.MergeFrom(rsm.DeepCopy()) + By("Updating RSM's status") + rsm.Status.UpdateRevision = "mock-version" + pods, err := common.GetPodListByStatefulSet(ctx, k8sClient, sts) + Expect(err).Should(BeNil()) + var podList []*corev1.Pod + for i := range pods { + podList = append(podList, &pods[i]) + } + testk8s.MockRSMReady(rsm, podList...) + Expect(k8sClient.Status().Patch(ctx, rsm, rsmPatch)).Should(Succeed()) + stsPatch := client.MergeFrom(sts.DeepCopy()) By("Updating StatefulSet's status") sts.Status.UpdateRevision = "mock-version" @@ -1923,96 +1822,6 @@ var _ = Describe("Cluster Controller", func() { factory.SetReplicas(3) }, true) - By("Check deployment workload has been created") - Eventually(testapps.List(&testCtx, generics.DeploymentSignature, - client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - }, client.InNamespace(clusterKey.Namespace))).ShouldNot(HaveLen(0)) - - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - - By("Check statefulset pod's volumes") - for _, sts := range stsList.Items { - podSpec := sts.Spec.Template - volumeNames := map[string]struct{}{} - for _, v := range podSpec.Spec.Volumes { - volumeNames[v.Name] = struct{}{} - } - - for _, cc := range [][]corev1.Container{ - podSpec.Spec.Containers, - podSpec.Spec.InitContainers, - } { - for _, c := range cc { - for _, vm := range c.VolumeMounts { - _, ok := volumeNames[vm.Name] - Expect(ok).Should(BeTrue()) - } - } - } - } - - By("Check associated PDB has been created") - Eventually(testapps.List(&testCtx, generics.PodDisruptionBudgetSignature, - client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - }, client.InNamespace(clusterKey.Namespace))).ShouldNot(BeEmpty()) - - podSpec := stsList.Items[0].Spec.Template.Spec - By("Checking created sts pods template with built-in toleration") - Expect(podSpec.Tolerations).Should(HaveLen(1)) - Expect(podSpec.Tolerations[0].Key).To(Equal(testDataPlaneTolerationKey)) - - By("Checking created sts pods template with built-in Affinity") - Expect(podSpec.Affinity.PodAntiAffinity == nil && podSpec.Affinity.PodAffinity == nil).Should(BeTrue()) - Expect(podSpec.Affinity.NodeAffinity).ShouldNot(BeNil()) - Expect(podSpec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions[0].Key).To( - Equal(testDataPlaneNodeAffinityKey)) - - By("Checking created sts pods template without TopologySpreadConstraints") - Expect(podSpec.TopologySpreadConstraints).Should(BeEmpty()) - - By("Check should create env configmap") - Eventually(func(g Gomega) { - cmList := &corev1.ConfigMapList{} - Expect(k8sClient.List(testCtx.Ctx, cmList, client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.AppConfigTypeLabelKey: "kubeblocks-env", - }, client.InNamespace(clusterKey.Namespace))).Should(Succeed()) - Expect(cmList.Items).ShouldNot(BeEmpty()) - Expect(cmList.Items).Should(HaveLen(len(compNameNDef))) - }).Should(Succeed()) - - By("Checking stateless services") - statelessExpectServices := map[string]ExpectService{ - // TODO: fix me later, proxy should not have internal headless service - testapps.ServiceHeadlessName: {svcType: corev1.ServiceTypeClusterIP, headless: true}, - testapps.ServiceDefaultName: {svcType: corev1.ServiceTypeClusterIP, headless: false}, - } - Eventually(func(g Gomega) { - validateCompSvcList(g, statelessCompName, statelessCompDefName, statelessExpectServices) - }).Should(Succeed()) - - By("Checking stateful types services") - for compName, compNameNDef := range compNameNDef { - if compName == statelessCompName { - continue - } - consensusExpectServices := map[string]ExpectService{ - testapps.ServiceHeadlessName: {svcType: corev1.ServiceTypeClusterIP, headless: true}, - testapps.ServiceDefaultName: {svcType: corev1.ServiceTypeClusterIP, headless: false}, - } - Eventually(func(g Gomega) { - validateCompSvcList(g, compName, compNameNDef, consensusExpectServices) - }).Should(Succeed()) - } - } - - checkAllResourcesCreatedWithRSMEnabled := func(compNameNDef map[string]string) { - createNWaitClusterObj(compNameNDef, func(compName string, factory *testapps.MockClusterFactory) { - factory.SetReplicas(3) - }, true) - By("Check stateless workload has been created") Eventually(testapps.List(&testCtx, generics.RSMSignature, client.MatchingLabels{ @@ -2063,19 +1872,6 @@ var _ = Describe("Cluster Controller", func() { By("Checking created rsm pods template without TopologySpreadConstraints") Expect(podSpec.TopologySpreadConstraints).Should(BeEmpty()) - if !intctrlutil.IsRSMEnabled() { - By("Check should create env configmap") - Eventually(func(g Gomega) { - cmList := &corev1.ConfigMapList{} - g.Expect(k8sClient.List(testCtx.Ctx, cmList, client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.AppConfigTypeLabelKey: "kubeblocks-env", - }, client.InNamespace(clusterKey.Namespace))).Should(Succeed()) - g.Expect(cmList.Items).ShouldNot(BeEmpty()) - g.Expect(cmList.Items).Should(HaveLen(len(compNameNDef))) - }).Should(Succeed()) - } - By("Checking stateless services") statelessExpectServices := map[string]ExpectService{ // TODO: fix me later, proxy should not have internal headless service @@ -2133,24 +1929,13 @@ var _ = Describe("Cluster Controller", func() { statefulCompName: statefulCompDefName, replicationCompName: replicationCompDefName, } - if intctrlutil.IsRSMEnabled() { - checkAllResourcesCreatedWithRSMEnabled(compNameNDef) - } else { - checkAllResourcesCreated(compNameNDef) - } + checkAllResourcesCreated(compNameNDef) By("Mocking components' PVCs to bound") var items []client.Object - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - for i := range rsmList.Items { - items = append(items, &rsmList.Items[i]) - } - } else { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - for i := range stsList.Items { - items = append(items, &stsList.Items[i]) - } + rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) + for i := range rsmList.Items { + items = append(items, &rsmList.Items[i]) } for _, item := range items { compName, ok := item.GetLabels()[constant.KBAppComponentLabelKey] @@ -2218,27 +2003,17 @@ var _ = Describe("Cluster Controller", func() { for _, secret := range secretList.Items { checkObject(&secret) } - if !intctrlutil.IsRSMEnabled() { - By("check configmap resources preserved") - Expect(cmList.Items).ShouldNot(BeEmpty()) - for _, cm := range cmList.Items { - checkObject(&cm) - } - } } return pvcList, secretList, cmList } - initPVCList, initSecretList, initCMList := checkPreservedObjects(clusterObj.UID) + initPVCList, initSecretList, _ := checkPreservedObjects(clusterObj.UID) By("create recovering cluster") lastClusterUID := clusterObj.UID - if intctrlutil.IsRSMEnabled() { - checkAllResourcesCreatedWithRSMEnabled(compNameNDef) - } else { - checkAllResourcesCreated(compNameNDef) - } + checkAllResourcesCreated(compNameNDef) + Expect(clusterObj.UID).ShouldNot(Equal(lastClusterUID)) - lastPVCList, lastSecretList, lastCMList := checkPreservedObjects("") + lastPVCList, lastSecretList, _ := checkPreservedObjects("") Expect(outOfOrderEqualFunc(initPVCList.Items, lastPVCList.Items, func(i corev1.PersistentVolumeClaim, j corev1.PersistentVolumeClaim) bool { return i.UID == j.UID @@ -2246,11 +2021,6 @@ var _ = Describe("Cluster Controller", func() { Expect(outOfOrderEqualFunc(initSecretList.Items, lastSecretList.Items, func(i corev1.Secret, j corev1.Secret) bool { return i.UID == j.UID })).Should(BeTrue()) - if !intctrlutil.IsRSMEnabled() { - Expect(outOfOrderEqualFunc(initCMList.Items, lastCMList.Items, func(i corev1.ConfigMap, j corev1.ConfigMap) bool { - return i.UID == j.UID - })).Should(BeTrue()) - } By("delete the cluster and should preserved PVC,Secret,CM resources but result updated the new last applied cluster UID") deleteCluster(appsv1alpha1.Halt) @@ -2618,42 +2388,26 @@ var _ = Describe("Cluster Controller", func() { By("Waiting for the cluster controller to create resources completely") waitForCreatingResourceCompletely(clusterKey, compName) - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - rsm := rsmList.Items[0] - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). - SetReplicas(*rsm.Spec.Replicas). - Create(&testCtx).GetObject() - By("mock pod/sts are available and wait for component enter running phase") - mockPods := testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, compName) - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - testk8s.MockStatefulSetReady(sts) - })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, &rsm, func() { - testk8s.MockRSMReady(&rsm, mockPods...) - })).ShouldNot(HaveOccurred()) - Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) - - By("the restore container has been removed from init containers") - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(&rsm), func(g Gomega, tmpRSM *workloads.ReplicatedStateMachine) { - g.Expect(tmpRSM.Spec.Template.Spec.InitContainers).Should(BeEmpty()) - })).Should(Succeed()) - } else { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - sts := stsList.Items[0] - By("mock pod/sts are available and wait for component enter running phase") - testapps.MockConsensusComponentPods(&testCtx, &sts, clusterObj.Name, compName) - Expect(testapps.ChangeObjStatus(&testCtx, &sts, func() { - testk8s.MockStatefulSetReady(&sts) - })).ShouldNot(HaveOccurred()) - Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) - - By("the restore container has been removed from init containers") - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(&sts), func(g Gomega, tmpSts *appsv1.StatefulSet) { - g.Expect(tmpSts.Spec.Template.Spec.InitContainers).Should(BeEmpty()) - })).Should(Succeed()) - } + rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) + rsm := rsmList.Items[0] + sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). + SetReplicas(*rsm.Spec.Replicas). + Create(&testCtx).GetObject() + By("mock pod/sts are available and wait for component enter running phase") + mockPods := testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, compName) + Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { + testk8s.MockStatefulSetReady(sts) + })).ShouldNot(HaveOccurred()) + Expect(testapps.ChangeObjStatus(&testCtx, &rsm, func() { + testk8s.MockRSMReady(&rsm, mockPods...) + })).ShouldNot(HaveOccurred()) + Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) + + By("the restore container has been removed from init containers") + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(&rsm), func(g Gomega, tmpRSM *workloads.ReplicatedStateMachine) { + g.Expect(tmpRSM.Spec.Template.Spec.InitContainers).Should(BeEmpty()) + })).Should(Succeed()) By("clean up annotations after cluster running") Expect(testapps.GetAndChangeObjStatus(&testCtx, clusterKey, func(tmpCluster *appsv1alpha1.Cluster) { @@ -2695,31 +2449,17 @@ var _ = Describe("Cluster Controller", func() { waitForCreatingResourceCompletely(clusterKey, compDefName) By("Checking statefulSet number") - var sts *appsv1.StatefulSet - if intctrlutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSMItemsCount(&testCtx, clusterKey, 1) - rsm := &rsmList.Items[0] - sts = testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). - SetReplicas(*rsm.Spec.Replicas).Create(&testCtx).GetObject() - mockPods := testapps.MockReplicationComponentPods(nil, testCtx, sts, clusterObj.Name, compDefName, nil) - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - testk8s.MockStatefulSetReady(sts) - })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, mockPods...) - })).ShouldNot(HaveOccurred()) - } else { - stsList := testk8s.ListAndCheckStatefulSetItemsCount(&testCtx, clusterKey, 1) - sts = &stsList.Items[0] - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - testk8s.MockStatefulSetReady(sts) - })).ShouldNot(HaveOccurred()) - for i := int32(0); i < *sts.Spec.Replicas; i++ { - podName := fmt.Sprintf("%s-%d", sts.Name, i) - testapps.MockReplicationComponentPod(nil, testCtx, sts, clusterObj.Name, - compDefName, podName, components.DefaultRole(i)) - } - } + rsmList := testk8s.ListAndCheckRSMItemsCount(&testCtx, clusterKey, 1) + rsm := &rsmList.Items[0] + sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). + SetReplicas(*rsm.Spec.Replicas).Create(&testCtx).GetObject() + mockPods := testapps.MockReplicationComponentPods(nil, testCtx, sts, clusterObj.Name, compDefName, nil) + Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { + testk8s.MockStatefulSetReady(sts) + })).ShouldNot(HaveOccurred()) + Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { + testk8s.MockRSMReady(rsm, mockPods...) + })).ShouldNot(HaveOccurred()) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.RunningClusterPhase)) }) }) @@ -2854,39 +2594,13 @@ var _ = Describe("Cluster Controller", func() { Name: clusterKey.Name + "-" + consensusCompName, } - if intctrlutil.IsRSMEnabled() { - By("checking workload exists") - Eventually(testapps.CheckObjExists(&testCtx, workloadKey, &workloads.ReplicatedStateMachine{}, true)).Should(Succeed()) - - finalizerName := "test/finalizer" - By("set finalizer for workload to prevent it from deletion") - Expect(testapps.GetAndChangeObj(&testCtx, workloadKey, func(wl *workloads.ReplicatedStateMachine) { - wl.ObjectMeta.Finalizers = append(wl.ObjectMeta.Finalizers, finalizerName) - })()).ShouldNot(HaveOccurred()) - - By("Delete the cluster") - testapps.DeleteObject(&testCtx, clusterKey, &appsv1alpha1.Cluster{}) - - By("checking cluster keep existing") - Consistently(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, true)).Should(Succeed()) - - By("remove finalizer of sts to get it deleted") - Expect(testapps.GetAndChangeObj(&testCtx, workloadKey, func(wl *workloads.ReplicatedStateMachine) { - wl.ObjectMeta.Finalizers = nil - })()).ShouldNot(HaveOccurred()) - - By("Wait for the cluster to terminate") - Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, false)).Should(Succeed()) - return - } - - By("checking sts exists") - Eventually(testapps.CheckObjExists(&testCtx, workloadKey, &appsv1.StatefulSet{}, true)).Should(Succeed()) + By("checking workload exists") + Eventually(testapps.CheckObjExists(&testCtx, workloadKey, &workloads.ReplicatedStateMachine{}, true)).Should(Succeed()) finalizerName := "test/finalizer" - By("set finalizer for sts to prevent it from deletion") - Expect(testapps.GetAndChangeObj(&testCtx, workloadKey, func(sts *appsv1.StatefulSet) { - sts.ObjectMeta.Finalizers = append(sts.ObjectMeta.Finalizers, finalizerName) + By("set finalizer for workload to prevent it from deletion") + Expect(testapps.GetAndChangeObj(&testCtx, workloadKey, func(wl *workloads.ReplicatedStateMachine) { + wl.ObjectMeta.Finalizers = append(wl.ObjectMeta.Finalizers, finalizerName) })()).ShouldNot(HaveOccurred()) By("Delete the cluster") @@ -2896,8 +2610,8 @@ var _ = Describe("Cluster Controller", func() { Consistently(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, true)).Should(Succeed()) By("remove finalizer of sts to get it deleted") - Expect(testapps.GetAndChangeObj(&testCtx, workloadKey, func(sts *appsv1.StatefulSet) { - sts.ObjectMeta.Finalizers = nil + Expect(testapps.GetAndChangeObj(&testCtx, workloadKey, func(wl *workloads.ReplicatedStateMachine) { + wl.ObjectMeta.Finalizers = nil })()).ShouldNot(HaveOccurred()) By("Wait for the cluster to terminate") diff --git a/controllers/apps/cluster_status_event_handler_test.go b/controllers/apps/cluster_status_event_handler_test.go index 4b059cafb00..dc3acf463ef 100644 --- a/controllers/apps/cluster_status_event_handler_test.go +++ b/controllers/apps/cluster_status_event_handler_test.go @@ -25,14 +25,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controllerutil" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" ) @@ -181,20 +179,12 @@ var _ = Describe("test cluster Failed/Abnormal phase", func() { By("watch warning event from StatefulSet, but mismatch condition ") // wait for StatefulSet created by cluster controller workloadName := clusterName + "-" + statefulMySQLCompName - var kd string - if controllerutil.IsRSMEnabled() { - kd = constant.RSMKind - Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace}, - func(g Gomega, fetched *workloads.ReplicatedStateMachine) { - g.Expect(fetched.Generation).To(BeEquivalentTo(1)) - })).Should(Succeed()) - } else { - kd = constant.StatefulSetKind - Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace}, - func(g Gomega, fetched *appsv1.StatefulSet) { - g.Expect(fetched.Generation).To(BeEquivalentTo(1)) - })).Should(Succeed()) - } + kd := constant.RSMKind + Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace}, + func(g Gomega, fetched *workloads.ReplicatedStateMachine) { + g.Expect(fetched.Generation).To(BeEquivalentTo(1)) + })).Should(Succeed()) + stsInvolvedObject := corev1.ObjectReference{ Name: workloadName, Kind: kd, diff --git a/controllers/apps/components/base.go b/controllers/apps/components/base.go deleted file mode 100644 index 717c7dedced..00000000000 --- a/controllers/apps/components/base.go +++ /dev/null @@ -1,652 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - "github.com/apecloud/kubeblocks/internal/generics" -) - -type componentBase struct { - Client client.Client - Recorder record.EventRecorder - Cluster *appsv1alpha1.Cluster - ClusterVersion *appsv1alpha1.ClusterVersion // building config needs the cluster version - Component *component.SynthesizedComponent // built synthesized component, replace it with component workload proto - ComponentSet componentSet - Dag *graph.DAG - WorkloadVertex *ictrltypes.LifecycleVertex // DAG vertex of main workload object -} - -func (c *componentBase) GetName() string { - return c.Component.Name -} - -func (c *componentBase) GetNamespace() string { - return c.Cluster.Namespace -} - -func (c *componentBase) GetClusterName() string { - return c.Cluster.Name -} - -func (c *componentBase) GetDefinitionName() string { - return c.Component.ComponentDef -} - -func (c *componentBase) GetCluster() *appsv1alpha1.Cluster { - return c.Cluster -} - -func (c *componentBase) GetClusterVersion() *appsv1alpha1.ClusterVersion { - return c.ClusterVersion -} - -func (c *componentBase) GetSynthesizedComponent() *component.SynthesizedComponent { - return c.Component -} - -func (c *componentBase) GetConsensusSpec() *appsv1alpha1.ConsensusSetSpec { - return c.Component.ConsensusSpec -} - -func (c *componentBase) GetMatchingLabels() client.MatchingLabels { - return client.MatchingLabels{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppInstanceLabelKey: c.GetClusterName(), - constant.KBAppComponentLabelKey: c.GetName(), - } -} - -func (c *componentBase) GetPhase() appsv1alpha1.ClusterComponentPhase { - if c.Cluster.Status.Components == nil { - return "" - } - if _, ok := c.Cluster.Status.Components[c.GetName()]; !ok { - return "" - } - return c.Cluster.Status.Components[c.GetName()].Phase -} - -func (c *componentBase) SetWorkload(obj client.Object, action *ictrltypes.LifecycleAction, parent *ictrltypes.LifecycleVertex) { - c.WorkloadVertex = c.AddResource(obj, action, parent) -} - -func (c *componentBase) AddResource(obj client.Object, action *ictrltypes.LifecycleAction, - parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { - if obj == nil { - panic("try to add nil object") - } - vertex := &ictrltypes.LifecycleVertex{ - Obj: obj, - Action: action, - } - c.Dag.AddVertex(vertex) - - if parent != nil { - c.Dag.Connect(parent, vertex) - } - return vertex -} - -func (c *componentBase) CreateResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { - return ictrltypes.LifecycleObjectCreate(c.Dag, obj, parent) -} - -func (c *componentBase) DeleteResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { - return ictrltypes.LifecycleObjectDelete(c.Dag, obj, parent) -} - -func (c *componentBase) UpdateResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { - return ictrltypes.LifecycleObjectUpdate(c.Dag, obj, parent) -} - -func (c *componentBase) PatchResource(obj client.Object, objCopy client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { - return ictrltypes.LifecycleObjectPatch(c.Dag, obj, objCopy, parent) -} - -func (c *componentBase) NoopResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { - return ictrltypes.LifecycleObjectNoop(c.Dag, obj, parent) -} - -// ValidateObjectsAction validates the action of objects in dag has been determined. -func (c *componentBase) ValidateObjectsAction() error { - for _, v := range c.Dag.Vertices() { - node, ok := v.(*ictrltypes.LifecycleVertex) - if !ok { - return fmt.Errorf("unexpected vertex type, cluster: %s, component: %s, vertex: %T", - c.GetClusterName(), c.GetName(), v) - } - if node.Obj == nil { - return fmt.Errorf("unexpected nil vertex object, cluster: %s, component: %s, vertex: %T", - c.GetClusterName(), c.GetName(), v) - } - if node.Action == nil { - return fmt.Errorf("unexpected nil vertex action, cluster: %s, component: %s, vertex: %T", - c.GetClusterName(), c.GetName(), v) - } - } - return nil -} - -// ResolveObjectsAction resolves the action of objects in dag to guarantee that all object actions will be determined. -func (c *componentBase) ResolveObjectsAction(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - snapshot, err := readCacheSnapshot(reqCtx, cli, c.GetCluster()) - if err != nil { - return err - } - for _, v := range c.Dag.Vertices() { - node, ok := v.(*ictrltypes.LifecycleVertex) - if !ok { - return fmt.Errorf("unexpected vertex type, cluster: %s, component: %s, vertex: %T", - c.GetClusterName(), c.GetName(), v) - } - if node.Action == nil { - if action, err := resolveObjectAction(snapshot, node, cli.Scheme()); err != nil { - return err - } else { - node.Action = action - } - } - } - if c.GetCluster().IsStatusUpdating() { - for _, vertex := range c.Dag.Vertices() { - v, _ := vertex.(*ictrltypes.LifecycleVertex) - // TODO(refactor): fix me, this is a workaround for h-scaling to update stateful set. - if _, ok := v.Obj.(*appsv1.StatefulSet); !ok { - v.Immutable = true - } - } - } - return c.ValidateObjectsAction() -} - -func (c *componentBase) UpdatePDB(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - pdbObjList, err := listObjWithLabelsInNamespace(reqCtx.Ctx, cli, generics.PodDisruptionBudgetSignature, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - for _, v := range ictrltypes.FindAll[*policyv1.PodDisruptionBudget](c.Dag) { - node := v.(*ictrltypes.LifecycleVertex) - pdbProto := node.Obj.(*policyv1.PodDisruptionBudget) - - if pos := slices.IndexFunc(pdbObjList, func(pdbObj *policyv1.PodDisruptionBudget) bool { - return pdbObj.GetName() == pdbProto.GetName() - }); pos < 0 { - node.Action = ictrltypes.ActionCreatePtr() // TODO: Create or Noop? - } else { - pdbObj := pdbObjList[pos] - if !reflect.DeepEqual(pdbObj.Spec, pdbProto.Spec) { - pdbObj.Spec = pdbProto.Spec - node.Obj = pdbObj - node.Action = ictrltypes.ActionUpdatePtr() - } - } - } - return nil -} - -func (c *componentBase) UpdateService(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - svcObjList, err := listObjWithLabelsInNamespace(reqCtx.Ctx, cli, generics.ServiceSignature, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return client.IgnoreNotFound(err) - } - - svcProtoList := ictrltypes.FindAll[*corev1.Service](c.Dag) - - // create new services or update existing services - for _, vertex := range svcProtoList { - node, _ := vertex.(*ictrltypes.LifecycleVertex) - svcProto, _ := node.Obj.(*corev1.Service) - - if pos := slices.IndexFunc(svcObjList, func(svc *corev1.Service) bool { - return svc.GetName() == svcProto.GetName() - }); pos < 0 { - node.Action = ictrltypes.ActionCreatePtr() - } else { - svcObj := svcObjList[pos] - // remove original monitor annotations - if len(svcObj.Annotations) > 0 { - maps.DeleteFunc(svcObj.Annotations, func(k, v string) bool { - return strings.HasPrefix(k, "monitor.kubeblocks.io") - }) - } - mergeAnnotations(svcObj.Annotations, &svcProto.Annotations) - svcObj.Annotations = svcProto.Annotations - svcObj.Spec = svcProto.Spec - node.Obj = svcObj - node.Action = ictrltypes.ActionUpdatePtr() - } - } - - // delete useless services - for _, svc := range svcObjList { - if pos := slices.IndexFunc(svcProtoList, func(vertex graph.Vertex) bool { - node, _ := vertex.(*ictrltypes.LifecycleVertex) - svcProto, _ := node.Obj.(*corev1.Service) - return svcProto.GetName() == svc.GetName() - }); pos < 0 { - c.DeleteResource(svc, nil) - } - } - return nil -} - -// SetStatusPhase sets the cluster component phase and messages conditionally. -func (c *componentBase) SetStatusPhase(phase appsv1alpha1.ClusterComponentPhase, - statusMessage appsv1alpha1.ComponentMessageMap, phaseTransitionMsg string) { - updatefn := func(status *appsv1alpha1.ClusterComponentStatus) error { - if status.Phase == phase { - return nil - } - status.Phase = phase - if status.Message == nil { - status.Message = statusMessage - } else { - for k, v := range statusMessage { - status.Message[k] = v - } - } - return nil - } - if err := c.updateStatus(phaseTransitionMsg, updatefn); err != nil { - panic(fmt.Sprintf("unexpected error occurred while updating component status: %s", err.Error())) - } -} - -func (c *componentBase) StatusWorkload(reqCtx intctrlutil.RequestCtx, cli client.Client, obj client.Object, txn *statusReconciliationTxn) error { - // if reflect.ValueOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() { - // return nil - // } - - pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return err - } - - isRunning, err := c.ComponentSet.IsRunning(reqCtx.Ctx, obj) - if err != nil { - return err - } - - var podsReady *bool - if c.Component.Replicas > 0 { - podsReadyForComponent, err := c.ComponentSet.PodsReady(reqCtx.Ctx, obj) - if err != nil { - return err - } - podsReady = &podsReadyForComponent - } - - hasFailedPodTimedOut := false - timedOutPodStatusMessage := appsv1alpha1.ComponentMessageMap{} - var delayedRequeueError error - isLatestWorkload := obj.GetAnnotations()[constant.KubeBlocksGenerationKey] == strconv.FormatInt(c.Cluster.Generation, 10) - // check if it is the latest obj after cluster does updates. - if !isRunning && !appsv1alpha1.ComponentPodsAreReady(podsReady) && isLatestWorkload { - var requeueAfter time.Duration - if hasFailedPodTimedOut, timedOutPodStatusMessage, requeueAfter = hasFailedAndTimedOutPod(pods); requeueAfter != 0 { - delayedRequeueError = intctrlutil.NewDelayedRequeueError(requeueAfter, "requeue for workload status to reconcile.") - } - } - - phase, statusMessage, err := c.buildStatus(reqCtx.Ctx, pods, isRunning, podsReady, hasFailedPodTimedOut, timedOutPodStatusMessage) - if err != nil { - if !intctrlutil.IsDelayedRequeueError(err) { - return err - } - delayedRequeueError = err - } - - phaseTransitionCondMsg := "" - if podsReady == nil { - phaseTransitionCondMsg = fmt.Sprintf("Running: %v, PodsReady: nil, PodsTimedout: %v", isRunning, hasFailedPodTimedOut) - } else { - phaseTransitionCondMsg = fmt.Sprintf("Running: %v, PodsReady: %v, PodsTimedout: %v", isRunning, *podsReady, hasFailedPodTimedOut) - } - - updatefn := func(status *appsv1alpha1.ClusterComponentStatus) error { - if phase != "" { - status.Phase = phase - } - status.SetMessage(statusMessage) - if !appsv1alpha1.ComponentPodsAreReady(podsReady) { - status.PodsReadyTime = nil - } else if !appsv1alpha1.ComponentPodsAreReady(status.PodsReady) { - // set podsReadyTime when pods of component are ready at the moment. - status.PodsReadyTime = &metav1.Time{Time: time.Now()} - } - status.PodsReady = podsReady - return nil - } - - if txn != nil { - txn.propose(phase, func() { - if err = c.updateStatus(phaseTransitionCondMsg, updatefn); err != nil { - panic(fmt.Sprintf("unexpected error occurred while updating component status: %s", err.Error())) - } - }) - return delayedRequeueError - } - // TODO(refactor): wait = true to requeue. - if err = c.updateStatus(phaseTransitionCondMsg, updatefn); err != nil { - return err - } - return delayedRequeueError -} - -func (c *componentBase) buildStatus(ctx context.Context, pods []*corev1.Pod, isRunning bool, podsReady *bool, - hasFailedPodTimedOut bool, timedOutPodStatusMessage appsv1alpha1.ComponentMessageMap) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap, error) { - var ( - err error - phase appsv1alpha1.ClusterComponentPhase - statusMessage appsv1alpha1.ComponentMessageMap - ) - if isRunning { - if c.Component.Replicas == 0 { - // if replicas number of component is zero, the component has stopped. - // 'Stopped' is a special 'Running' status for workload(StatefulSet/Deployment). - phase = appsv1alpha1.StoppedClusterCompPhase - } else { - // change component phase to Running when workloads of component are running. - phase = appsv1alpha1.RunningClusterCompPhase - } - return phase, statusMessage, nil - } - - if appsv1alpha1.ComponentPodsAreReady(podsReady) { - // check if the role probe timed out when component phase is not Running but all pods of component are ready. - phase, statusMessage = c.ComponentSet.GetPhaseWhenPodsReadyAndProbeTimeout(pods) - // if component is not running and probe is not timed out, requeue. - if phase == "" { - c.Recorder.Event(c.Cluster, corev1.EventTypeNormal, "WaitingForProbeSuccess", "Waiting for probe success") - return phase, statusMessage, intctrlutil.NewDelayedRequeueError(time.Second*10, "Waiting for probe success") - } - return phase, statusMessage, nil - } - - // get the phase if failed pods have timed out or the pods are not running when there are no changes to the component. - originPhaseIsUpRunning := slices.Contains(appsv1alpha1.GetComponentUpRunningPhase(), c.GetPhase()) - if hasFailedPodTimedOut || originPhaseIsUpRunning { - phase, statusMessage, err = c.ComponentSet.GetPhaseWhenPodsNotReady(ctx, c.GetName(), originPhaseIsUpRunning) - if err != nil { - return "", nil, err - } - } - if statusMessage == nil { - statusMessage = timedOutPodStatusMessage - } else { - for k, v := range timedOutPodStatusMessage { - statusMessage[k] = v - } - } - return phase, statusMessage, nil -} - -// updateStatus updates the cluster component status by @updatefn, with additional message to explain the transition occurred. -func (c *componentBase) updateStatus(phaseTransitionMsg string, updatefn func(status *appsv1alpha1.ClusterComponentStatus) error) error { - if updatefn == nil { - return nil - } - - status := c.getComponentStatus() - phase := status.Phase - err := updatefn(&status) - if err != nil { - return err - } - c.Cluster.Status.Components[c.GetName()] = status - - if phase != status.Phase { - // TODO: logging the event - if c.Recorder != nil && phaseTransitionMsg != "" { - c.Recorder.Eventf(c.Cluster, corev1.EventTypeNormal, ComponentPhaseTransition, phaseTransitionMsg) - } - } - - return nil -} - -func (c *componentBase) getComponentStatus() appsv1alpha1.ClusterComponentStatus { - if c.Cluster.Status.Components == nil { - c.Cluster.Status.Components = make(map[string]appsv1alpha1.ClusterComponentStatus) - } - if _, ok := c.Cluster.Status.Components[c.GetName()]; !ok { - c.Cluster.Status.Components[c.GetName()] = appsv1alpha1.ClusterComponentStatus{} - } - return c.Cluster.Status.Components[c.GetName()] -} - -// hasFailedAndTimedOutPod returns whether the pods of components are still failed after a PodFailedTimeout period. -func hasFailedAndTimedOutPod(pods []*corev1.Pod) (bool, appsv1alpha1.ComponentMessageMap, time.Duration) { - var ( - hasTimedOutPod bool - messages = appsv1alpha1.ComponentMessageMap{} - hasFailedPod bool - requeueAfter time.Duration - ) - for _, pod := range pods { - isFailed, isTimedOut, messageStr := IsPodFailedAndTimedOut(pod) - if !isFailed { - continue - } - if isTimedOut { - hasTimedOutPod = true - messages.SetObjectMessage(pod.Kind, pod.Name, messageStr) - } else { - hasFailedPod = true - } - } - if hasFailedPod && !hasTimedOutPod { - requeueAfter = PodContainerFailedTimeout - } - return hasTimedOutPod, messages, requeueAfter -} - -// isPodScheduledFailedAndTimedOut checks whether the unscheduled pod has timed out. -func isPodScheduledFailedAndTimedOut(pod *corev1.Pod) (bool, bool, string) { - for _, cond := range pod.Status.Conditions { - if cond.Type != corev1.PodScheduled { - continue - } - if cond.Status == corev1.ConditionTrue { - return false, false, "" - } - return true, time.Now().After(cond.LastTransitionTime.Add(PodScheduledFailedTimeout)), cond.Message - } - return false, false, "" -} - -// IsPodFailedAndTimedOut checks if the pod is failed and timed out. -func IsPodFailedAndTimedOut(pod *corev1.Pod) (bool, bool, string) { - if isFailed, isTimedOut, message := isPodScheduledFailedAndTimedOut(pod); isFailed { - return isFailed, isTimedOut, message - } - initContainerFailed, message := isAnyContainerFailed(pod.Status.InitContainerStatuses) - if initContainerFailed { - return initContainerFailed, isContainerFailedAndTimedOut(pod, corev1.PodInitialized), message - } - containerFailed, message := isAnyContainerFailed(pod.Status.ContainerStatuses) - if containerFailed { - return containerFailed, isContainerFailedAndTimedOut(pod, corev1.ContainersReady), message - } - return false, false, "" -} - -// isAnyContainerFailed checks whether any container in the list is failed. -func isAnyContainerFailed(containersStatus []corev1.ContainerStatus) (bool, string) { - for _, v := range containersStatus { - waitingState := v.State.Waiting - if waitingState != nil && waitingState.Message != "" { - return true, waitingState.Message - } - terminatedState := v.State.Terminated - if terminatedState != nil && terminatedState.Message != "" { - return true, terminatedState.Message - } - } - return false, "" -} - -// isContainerFailedAndTimedOut checks whether the failed container has timed out. -func isContainerFailedAndTimedOut(pod *corev1.Pod, podConditionType corev1.PodConditionType) bool { - containerReadyCondition := intctrlutil.GetPodCondition(&pod.Status, podConditionType) - if containerReadyCondition == nil || containerReadyCondition.LastTransitionTime.IsZero() { - return false - } - return time.Now().After(containerReadyCondition.LastTransitionTime.Add(PodContainerFailedTimeout)) -} - -type gvkName struct { - gvk schema.GroupVersionKind - ns, name string -} - -type clusterSnapshot map[gvkName]client.Object - -func getGVKName(object client.Object, scheme *runtime.Scheme) (*gvkName, error) { - gvk, err := apiutil.GVKForObject(object, scheme) - if err != nil { - return nil, err - } - return &gvkName{ - gvk: gvk, - ns: object.GetNamespace(), - name: object.GetName(), - }, nil -} - -func isOwnerOf(owner, obj client.Object, scheme *runtime.Scheme) bool { - ro, ok := owner.(runtime.Object) - if !ok { - return false - } - gvk, err := apiutil.GVKForObject(ro, scheme) - if err != nil { - return false - } - ref := metav1.OwnerReference{ - APIVersion: gvk.GroupVersion().String(), - Kind: gvk.Kind, - UID: owner.GetUID(), - Name: owner.GetName(), - } - owners := obj.GetOwnerReferences() - referSameObject := func(a, b metav1.OwnerReference) bool { - aGV, err := schema.ParseGroupVersion(a.APIVersion) - if err != nil { - return false - } - - bGV, err := schema.ParseGroupVersion(b.APIVersion) - if err != nil { - return false - } - - return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name - } - for _, ownerRef := range owners { - if referSameObject(ownerRef, ref) { - return true - } - } - return false -} - -func ownedKinds() []client.ObjectList { - return []client.ObjectList{ - &appsv1.StatefulSetList{}, - &appsv1.DeploymentList{}, - &corev1.ServiceList{}, - &corev1.SecretList{}, - &corev1.ConfigMapList{}, - &corev1.PersistentVolumeClaimList{}, // TODO(merge): remove it? - &policyv1.PodDisruptionBudgetList{}, - &dataprotectionv1alpha1.BackupPolicyList{}, - } -} - -// read all objects owned by component -func readCacheSnapshot(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster *appsv1alpha1.Cluster) (clusterSnapshot, error) { - // list what kinds of object cluster owns - kinds := ownedKinds() - snapshot := make(clusterSnapshot) - ml := client.MatchingLabels{constant.AppInstanceLabelKey: cluster.GetName()} - inNS := client.InNamespace(cluster.Namespace) - for _, list := range kinds { - if err := cli.List(reqCtx.Ctx, list, inNS, ml); err != nil { - return nil, err - } - // reflect get list.Items - items := reflect.ValueOf(list).Elem().FieldByName("Items") - l := items.Len() - for i := 0; i < l; i++ { - // get the underlying object - object := items.Index(i).Addr().Interface().(client.Object) - // put to snapshot if owned by our cluster - if isOwnerOf(cluster, object, cli.Scheme()) { - name, err := getGVKName(object, cli.Scheme()) - if err != nil { - return nil, err - } - snapshot[*name] = object - } - } - } - return snapshot, nil -} - -func resolveObjectAction(snapshot clusterSnapshot, vertex *ictrltypes.LifecycleVertex, scheme *runtime.Scheme) (*ictrltypes.LifecycleAction, error) { - gvk, err := getGVKName(vertex.Obj, scheme) - if err != nil { - return nil, err - } - if _, ok := snapshot[*gvk]; ok { - return ictrltypes.ActionNoopPtr(), nil - } else { - return ictrltypes.ActionCreatePtr(), nil - } -} diff --git a/controllers/apps/components/base_stateful.go b/controllers/apps/components/base_stateful.go deleted file mode 100644 index 3d1806a8a70..00000000000 --- a/controllers/apps/components/base_stateful.go +++ /dev/null @@ -1,999 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "golang.org/x/exp/maps" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubectl/pkg/util/podutils" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" - "github.com/apecloud/kubeblocks/internal/configuration/util" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - "github.com/apecloud/kubeblocks/internal/generics" - viper "github.com/apecloud/kubeblocks/internal/viperx" - lorry "github.com/apecloud/kubeblocks/lorry/client" -) - -// rsmComponentBase as a base class for single rsm based component (stateful & replication & consensus). -type rsmComponentBase struct { - componentBase - // runningWorkload can be nil, and the replicas of workload can be nil (zero) - runningWorkload *workloads.ReplicatedStateMachine -} - -func (c *rsmComponentBase) init(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder, load bool) error { - var err error - if builder != nil { - if err = builder.BuildEnv(). - BuildWorkload(). - BuildPDB(). - BuildConfig(). - BuildTLSVolume(). - BuildVolumeMount(). - BuildTLSCert(). - Complete(); err != nil { - return err - } - } - if load { - c.runningWorkload, err = c.loadRunningWorkload(reqCtx, cli) - if err != nil { - return err - } - } - return nil -} - -func (c *rsmComponentBase) loadRunningWorkload(reqCtx intctrlutil.RequestCtx, cli client.Client) (*workloads.ReplicatedStateMachine, error) { - rsmList, err := listRSMOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return nil, err - } - cnt := len(rsmList) - switch { - case cnt == 0: - return nil, nil - case cnt == 1: - return rsmList[0], nil - default: - return nil, fmt.Errorf("more than one workloads found for the component, cluster: %s, component: %s, cnt: %d", - c.GetClusterName(), c.GetName(), cnt) - } -} - -func (c *rsmComponentBase) GetBuiltObjects(builder componentWorkloadBuilder) ([]client.Object, error) { - dagSnapshot := c.Dag - defer func() { - c.Dag = dagSnapshot - }() - - c.Dag = graph.NewDAG() - if err := c.init(intctrlutil.RequestCtx{}, nil, builder, false); err != nil { - return nil, err - } - - objs := make([]client.Object, 0) - for _, v := range c.Dag.Vertices() { - if vv, ok := v.(*ictrltypes.LifecycleVertex); ok { - objs = append(objs, vv.Obj) - } - } - return objs, nil -} - -func (c *rsmComponentBase) Create(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { - if err := c.init(reqCtx, cli, builder, false); err != nil { - return err - } - - if err := c.ValidateObjectsAction(); err != nil { - return err - } - - return nil -} - -func (c *rsmComponentBase) Delete(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - // TODO(impl): delete component owned resources - return nil -} - -func (c *rsmComponentBase) Update(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { - if err := c.init(reqCtx, cli, builder, true); err != nil { - return err - } - - if c.runningWorkload != nil { - if err := c.Restart(reqCtx, cli); err != nil { - return err - } - - // cluster.spec.componentSpecs[*].volumeClaimTemplates[*].spec.resources.requests[corev1.ResourceStorage] - if err := c.ExpandVolume(reqCtx, cli); err != nil { - return err - } - - // cluster.spec.componentSpecs[*].replicas - if err := c.HorizontalScale(reqCtx, cli); err != nil { - return err - } - } - - if err := c.updateUnderlyingResources(reqCtx, cli, c.runningWorkload); err != nil { - return err - } - - return c.ResolveObjectsAction(reqCtx, cli) -} - -func (c *rsmComponentBase) Status(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { - if err := c.init(reqCtx, cli, builder, true); err != nil { - return err - } - if c.runningWorkload == nil { - return nil - } - - isDeleting := func() bool { - return !c.runningWorkload.DeletionTimestamp.IsZero() - }() - isZeroReplica := func() bool { - return (c.runningWorkload.Spec.Replicas == nil || *c.runningWorkload.Spec.Replicas == 0) && c.Component.Replicas == 0 - }() - pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return err - } - hasComponentPod := func() bool { - return len(pods) > 0 - }() - isRunning, err := c.ComponentSet.IsRunning(reqCtx.Ctx, c.runningWorkload) - if err != nil { - return err - } - isAllConfigSynced := c.isAllConfigSynced(reqCtx, cli) - hasFailedPod, messages, err := c.hasFailedPod(reqCtx, cli, pods) - if err != nil { - return err - } - isScaleOutFailed, err := c.isScaleOutFailed(reqCtx, cli) - if err != nil { - return err - } - hasRunningVolumeExpansion, hasFailedVolumeExpansion, err := c.hasVolumeExpansionRunning(reqCtx, cli) - if err != nil { - return err - } - hasFailure := func() bool { - return hasFailedPod || isScaleOutFailed || hasFailedVolumeExpansion - }() - isComponentAvailable, err := c.isAvailable(reqCtx, cli, pods) - if err != nil { - return err - } - isInCreatingPhase := func() bool { - phase := c.getComponentStatus().Phase - return phase == "" || phase == appsv1alpha1.CreatingClusterCompPhase - }() - - updatePodsReady := func(ready bool) { - _ = c.updateStatus("", func(status *appsv1alpha1.ClusterComponentStatus) error { - // if ready flag not changed, don't update the ready time - if status.PodsReady != nil && *status.PodsReady == ready { - return nil - } - status.PodsReady = &ready - if ready { - time := metav1.Now() - status.PodsReadyTime = &time - } - return nil - }) - } - - podsReady := false - switch { - case isDeleting: - c.SetStatusPhase(appsv1alpha1.DeletingClusterCompPhase, nil, "Component is Deleting") - case isZeroReplica && hasComponentPod: - c.SetStatusPhase(appsv1alpha1.StoppingClusterCompPhase, nil, "Component is Stopping") - podsReady = true - case isZeroReplica: - c.SetStatusPhase(appsv1alpha1.StoppedClusterCompPhase, nil, "Component is Stopped") - podsReady = true - case isRunning && isAllConfigSynced && !hasRunningVolumeExpansion: - c.SetStatusPhase(appsv1alpha1.RunningClusterCompPhase, nil, "Component is Running") - podsReady = true - case !hasFailure && isInCreatingPhase: - c.SetStatusPhase(appsv1alpha1.CreatingClusterCompPhase, nil, "Create a new component") - case !hasFailure: - c.SetStatusPhase(appsv1alpha1.UpdatingClusterCompPhase, nil, "Component is Updating") - case !isComponentAvailable: - c.SetStatusPhase(appsv1alpha1.FailedClusterCompPhase, messages, "Component is Failed") - default: - c.SetStatusPhase(appsv1alpha1.AbnormalClusterCompPhase, nil, "unknown") - } - updatePodsReady(podsReady) - - // works should continue to be done after spec updated. - if err := c.horizontalScale(reqCtx, cli); err != nil { - return err - } - - if vertexes, err := c.ComponentSet.HandleRoleChange(reqCtx.Ctx, c.runningWorkload); err != nil { - return err - } else { - for _, v := range vertexes { - c.Dag.AddVertex(v) - } - } - - c.updateWorkload(c.runningWorkload) - - // update component info to pods' annotations - if err := updateComponentInfoToPods(reqCtx.Ctx, cli, c.Cluster, c.Component, c.Dag); err != nil { - return err - } - - // patch the current componentSpec workload's custom labels - if err := updateCustomLabelToPods(reqCtx.Ctx, cli, c.Cluster, c.Component, c.Dag); err != nil { - reqCtx.Event(c.Cluster, corev1.EventTypeWarning, "Component Workload Controller PatchWorkloadCustomLabelFailed", err.Error()) - return err - } - - return nil -} - -// isAvailable tells whether the component is basically available, ether working well or in a fragile state: -// 1. at least one pod is available -// 2. with latest revision -// 3. and with leader role label set -func (c *rsmComponentBase) isAvailable(reqCtx intctrlutil.RequestCtx, cli client.Client, pods []*corev1.Pod) (bool, error) { - if isLatestRevision, err := IsComponentPodsWithLatestRevision(reqCtx.Ctx, cli, c.Cluster, c.runningWorkload); err != nil { - return false, err - } else if !isLatestRevision { - return false, nil - } - - shouldCheckLeader := func() bool { - return c.Component.WorkloadType == appsv1alpha1.Consensus || c.Component.WorkloadType == appsv1alpha1.Replication - }() - hasLeaderRoleLabel := func(pod *corev1.Pod) bool { - roleName, ok := pod.Labels[constant.RoleLabelKey] - if !ok { - return false - } - for _, replicaRole := range c.runningWorkload.Spec.Roles { - if roleName == replicaRole.Name && replicaRole.IsLeader { - return true - } - } - return false - } - for _, pod := range pods { - if !podutils.IsPodAvailable(pod, 0, metav1.Time{Time: time.Now()}) { - continue - } - if !shouldCheckLeader { - continue - } - if _, ok := pod.Labels[constant.RoleLabelKey]; ok { - continue - } - if hasLeaderRoleLabel(pod) { - return true, nil - } - } - return false, nil -} - -func (c *rsmComponentBase) hasFailedPod(reqCtx intctrlutil.RequestCtx, cli client.Client, pods []*corev1.Pod) (bool, appsv1alpha1.ComponentMessageMap, error) { - if isLatestRevision, err := IsComponentPodsWithLatestRevision(reqCtx.Ctx, cli, c.Cluster, c.runningWorkload); err != nil { - return false, nil, err - } else if !isLatestRevision { - return false, nil, nil - } - - var messages appsv1alpha1.ComponentMessageMap - // check pod readiness - hasFailedPod, msg, _ := hasFailedAndTimedOutPod(pods) - if hasFailedPod { - messages = msg - return true, messages, nil - } - // check role probe - if c.Component.WorkloadType != appsv1alpha1.Consensus && c.Component.WorkloadType != appsv1alpha1.Replication { - return false, messages, nil - } - hasProbeTimeout := false - for _, pod := range pods { - if _, ok := pod.Labels[constant.RoleLabelKey]; ok { - continue - } - for _, condition := range pod.Status.Conditions { - if condition.Type != corev1.PodReady || condition.Status != corev1.ConditionTrue { - continue - } - podsReadyTime := &condition.LastTransitionTime - if isProbeTimeout(c.Component.Probes, podsReadyTime) { - hasProbeTimeout = true - if messages == nil { - messages = appsv1alpha1.ComponentMessageMap{} - } - messages.SetObjectMessage(pod.Kind, pod.Name, "Role probe timeout, check whether the application is available") - } - } - } - return hasProbeTimeout, messages, nil -} - -func (c *rsmComponentBase) isScaleOutFailed(reqCtx intctrlutil.RequestCtx, cli client.Client) (bool, error) { - if c.runningWorkload.Spec.Replicas == nil { - return false, nil - } - if c.Component.Replicas <= *c.runningWorkload.Spec.Replicas { - return false, nil - } - if c.WorkloadVertex == nil { - return false, nil - } - stsObj := ConvertRSMToSTS(c.runningWorkload) - rsmProto := c.WorkloadVertex.Obj.(*workloads.ReplicatedStateMachine) - stsProto := ConvertRSMToSTS(rsmProto) - backupKey := types.NamespacedName{ - Namespace: stsObj.Namespace, - Name: stsObj.Name + "-scaling", - } - d, err := newDataClone(reqCtx, cli, c.Cluster, c.Component, stsObj, stsProto, backupKey) - if err != nil { - return false, err - } - if status, err := d.checkBackupStatus(); err != nil { - return false, err - } else if status == backupStatusFailed { - return true, nil - } - for _, name := range d.pvcKeysToRestore() { - if status, err := d.checkRestoreStatus(name); err != nil { - return false, err - } else if status == backupStatusFailed { - return true, nil - } - } - return false, nil -} - -func (c *rsmComponentBase) Restart(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return restartPod(&c.runningWorkload.Spec.Template) -} - -func (c *rsmComponentBase) ExpandVolume(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - for _, vct := range c.runningWorkload.Spec.VolumeClaimTemplates { - var proto *corev1.PersistentVolumeClaimTemplate - for _, v := range c.Component.VolumeClaimTemplates { - if v.Name == vct.Name { - proto = &v - break - } - } - // REVIEW: seems we can remove a volume claim from templates at runtime, without any changes and warning messages? - if proto == nil { - continue - } - - if err := c.expandVolumes(reqCtx, cli, vct.Name, proto); err != nil { - return err - } - } - return nil -} - -func (c *rsmComponentBase) expandVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, - vctName string, proto *corev1.PersistentVolumeClaimTemplate) error { - pvcNotFound := false - for i := *c.runningWorkload.Spec.Replicas - 1; i >= 0; i-- { - pvc := &corev1.PersistentVolumeClaim{} - pvcKey := types.NamespacedName{ - Namespace: c.GetNamespace(), - Name: fmt.Sprintf("%s-%s-%d", vctName, c.runningWorkload.Name, i), - } - if err := cli.Get(reqCtx.Ctx, pvcKey, pvc); err != nil { - if apierrors.IsNotFound(err) { - pvcNotFound = true - } else { - return err - } - } - if err := c.updatePVCSize(reqCtx, cli, pvcKey, pvc, pvcNotFound, proto); err != nil { - return err - } - } - return nil -} - -func (c *rsmComponentBase) updatePVCSize(reqCtx intctrlutil.RequestCtx, cli client.Client, pvcKey types.NamespacedName, - pvc *corev1.PersistentVolumeClaim, pvcNotFound bool, vctProto *corev1.PersistentVolumeClaimTemplate) error { - // reference: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#recovering-from-failure-when-expanding-volumes - // 1. Mark the PersistentVolume(PV) that is bound to the PersistentVolumeClaim(PVC) with Retain reclaim policy. - // 2. Delete the PVC. Since PV has Retain reclaim policy - we will not lose any data when we recreate the PVC. - // 3. Delete the claimRef entry from PV specs, so as new PVC can bind to it. This should make the PV Available. - // 4. Re-create the PVC with smaller size than PV and set volumeName field of the PVC to the name of the PV. This should bind new PVC to existing PV. - // 5. Don't forget to restore the reclaim policy of the PV. - newPVC := pvc.DeepCopy() - if pvcNotFound { - newPVC.Name = pvcKey.Name - newPVC.Namespace = pvcKey.Namespace - newPVC.SetLabels(vctProto.Labels) - newPVC.Spec = vctProto.Spec - ml := client.MatchingLabels{ - constant.PVCNameLabelKey: pvcKey.Name, - } - pvList := corev1.PersistentVolumeList{} - if err := cli.List(reqCtx.Ctx, &pvList, ml); err != nil { - return err - } - for _, pv := range pvList.Items { - // find pv referenced this pvc - if pv.Spec.ClaimRef == nil { - continue - } - if pv.Spec.ClaimRef.Name == pvcKey.Name { - newPVC.Spec.VolumeName = pv.Name - break - } - } - } else { - newPVC.Spec.Resources.Requests[corev1.ResourceStorage] = vctProto.Spec.Resources.Requests[corev1.ResourceStorage] - // delete annotation to make it re-bind - delete(newPVC.Annotations, "pv.kubernetes.io/bind-completed") - } - - pvNotFound := false - - // step 1: update pv to retain - pv := &corev1.PersistentVolume{} - pvKey := types.NamespacedName{ - Namespace: pvcKey.Namespace, - Name: newPVC.Spec.VolumeName, - } - if err := cli.Get(reqCtx.Ctx, pvKey, pv); err != nil { - if apierrors.IsNotFound(err) { - pvNotFound = true - } else { - return err - } - } - - type pvcRecreateStep int - const ( - pvPolicyRetainStep pvcRecreateStep = iota - deletePVCStep - removePVClaimRefStep - createPVCStep - pvRestorePolicyStep - ) - - addStepMap := map[pvcRecreateStep]func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex{ - pvPolicyRetainStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 1: update pv to retain - retainPV := pv.DeepCopy() - if retainPV.Labels == nil { - retainPV.Labels = make(map[string]string) - } - // add label to pv, in case pvc get deleted, and we can't find pv - retainPV.Labels[constant.PVCNameLabelKey] = pvcKey.Name - if retainPV.Annotations == nil { - retainPV.Annotations = make(map[string]string) - } - retainPV.Annotations[constant.PVLastClaimPolicyAnnotationKey] = string(pv.Spec.PersistentVolumeReclaimPolicy) - retainPV.Spec.PersistentVolumeReclaimPolicy = corev1.PersistentVolumeReclaimRetain - return c.PatchResource(retainPV, pv, fromVertex) - }, - deletePVCStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 2: delete pvc, this will not delete pv because policy is 'retain' - removeFinalizerPVC := pvc.DeepCopy() - removeFinalizerPVC.SetFinalizers([]string{}) - removeFinalizerPVCVertex := c.PatchResource(removeFinalizerPVC, pvc, fromVertex) - return c.DeleteResource(pvc, removeFinalizerPVCVertex) - }, - removePVClaimRefStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 3: remove claimRef in pv - removeClaimRefPV := pv.DeepCopy() - if removeClaimRefPV.Spec.ClaimRef != nil { - removeClaimRefPV.Spec.ClaimRef.UID = "" - removeClaimRefPV.Spec.ClaimRef.ResourceVersion = "" - } - return c.PatchResource(removeClaimRefPV, pv, fromVertex) - }, - createPVCStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 4: create new pvc - newPVC.SetResourceVersion("") - return c.CreateResource(newPVC, fromVertex) - }, - pvRestorePolicyStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 5: restore to previous pv policy - restorePV := pv.DeepCopy() - policy := corev1.PersistentVolumeReclaimPolicy(restorePV.Annotations[constant.PVLastClaimPolicyAnnotationKey]) - if len(policy) == 0 { - policy = corev1.PersistentVolumeReclaimDelete - } - restorePV.Spec.PersistentVolumeReclaimPolicy = policy - return c.PatchResource(restorePV, pv, fromVertex) - }, - } - - updatePVCByRecreateFromStep := func(fromStep pvcRecreateStep) { - lastVertex := c.WorkloadVertex - for step := pvRestorePolicyStep; step >= fromStep && step >= pvPolicyRetainStep; step-- { - lastVertex = addStepMap[step](lastVertex, step) - } - } - - targetQuantity := vctProto.Spec.Resources.Requests[corev1.ResourceStorage] - if pvcNotFound && !pvNotFound { - // this could happen if create pvc step failed when recreating pvc - updatePVCByRecreateFromStep(removePVClaimRefStep) - return nil - } - if pvcNotFound && pvNotFound { - // if both pvc and pv not found, do nothing - return nil - } - if reflect.DeepEqual(pvc.Spec.Resources, newPVC.Spec.Resources) && pv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain { - // this could happen if create pvc succeeded but last step failed - updatePVCByRecreateFromStep(pvRestorePolicyStep) - return nil - } - if pvcQuantity := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; !viper.GetBool(constant.CfgRecoverVolumeExpansionFailure) && - pvcQuantity.Cmp(targetQuantity) == 1 && // check if it's compressing volume - targetQuantity.Cmp(*pvc.Status.Capacity.Storage()) >= 0 { // check if target size is greater than or equal to actual size - // this branch means we can update pvc size by recreate it - updatePVCByRecreateFromStep(pvPolicyRetainStep) - return nil - } - if pvcQuantity := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; pvcQuantity.Cmp(vctProto.Spec.Resources.Requests[corev1.ResourceStorage]) != 0 { - // use pvc's update without anything extra - c.UpdateResource(newPVC, c.WorkloadVertex) - return nil - } - // all the else means no need to update - - return nil -} - -func (c *rsmComponentBase) isAllConfigSynced(reqCtx intctrlutil.RequestCtx, cli client.Client) bool { - checkFinishedReconfigure := func(cm *corev1.ConfigMap) bool { - labels := cm.GetLabels() - annotations := cm.GetAnnotations() - if len(annotations) == 0 || len(labels) == 0 { - return false - } - hash, _ := util.ComputeHash(cm.Data) - return labels[constant.CMInsConfigurationHashLabelKey] == hash - } - - var ( - cmKey client.ObjectKey - cmObj = &corev1.ConfigMap{} - allConfigSynced = true - ) - for _, configSpec := range c.Component.ConfigTemplates { - cmKey = client.ObjectKey{ - Namespace: c.GetNamespace(), - Name: cfgcore.GetComponentCfgName(c.GetClusterName(), c.GetName(), configSpec.Name), - } - if err := cli.Get(reqCtx.Ctx, cmKey, cmObj); err != nil { - return true - } - if !checkFinishedReconfigure(cmObj) { - allConfigSynced = false - break - } - } - return allConfigSynced -} - -func (c *rsmComponentBase) hasVolumeExpansionRunning(reqCtx intctrlutil.RequestCtx, cli client.Client) (bool, bool, error) { - var ( - running bool - failed bool - ) - for _, vct := range c.runningWorkload.Spec.VolumeClaimTemplates { - volumes, err := c.getRunningVolumes(reqCtx, cli, vct.Name, c.runningWorkload) - if err != nil { - return false, false, err - } - for _, v := range volumes { - if v.Status.Capacity == nil || v.Status.Capacity.Storage().Cmp(v.Spec.Resources.Requests[corev1.ResourceStorage]) >= 0 { - continue - } - running = true - // TODO: how to check the expansion failed? - } - } - return running, failed, nil -} - -func (c *rsmComponentBase) HorizontalScale(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.horizontalScale(reqCtx, cli) -} - -func (c *rsmComponentBase) horizontalScale(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - sts := ConvertRSMToSTS(c.runningWorkload) - if sts.Status.ReadyReplicas == c.Component.Replicas { - return nil - } - ret := c.horizontalScaling(sts) - if ret == 0 { - if err := c.postScaleIn(reqCtx, cli); err != nil { - return err - } - if err := c.postScaleOut(reqCtx, cli, sts); err != nil { - return err - } - return nil - } - if ret < 0 { - if err := c.scaleIn(reqCtx, cli, sts); err != nil { - return err - } - } else { - if err := c.scaleOut(reqCtx, cli, sts); err != nil { - return err - } - } - - if err := c.updatePodReplicaLabel4Scaling(reqCtx, cli, c.Component.Replicas); err != nil { - return err - } - - // update KB___ env needed by pod to obtain hostname. - c.updatePodEnvConfig() - - reqCtx.Recorder.Eventf(c.Cluster, - corev1.EventTypeNormal, - "HorizontalScale", - "start horizontal scale component %s of cluster %s from %d to %d", - c.GetName(), c.GetClusterName(), int(c.Component.Replicas)-ret, c.Component.Replicas) - - return nil -} - -// < 0 for scale in, > 0 for scale out, and == 0 for nothing -func (c *rsmComponentBase) horizontalScaling(stsObj *appsv1.StatefulSet) int { - return int(c.Component.Replicas - *stsObj.Spec.Replicas) -} - -func (c *rsmComponentBase) updatePodEnvConfig() { - for _, v := range ictrltypes.FindAll[*corev1.ConfigMap](c.Dag) { - node := v.(*ictrltypes.LifecycleVertex) - // TODO: need a way to reference the env config. - envConfigName := fmt.Sprintf("%s-%s-env", c.GetClusterName(), c.GetName()) - if node.Obj.GetName() == envConfigName { - node.Action = ictrltypes.ActionUpdatePtr() - } - } -} - -func (c *rsmComponentBase) updatePodReplicaLabel4Scaling(reqCtx intctrlutil.RequestCtx, cli client.Client, replicas int32) error { - pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return err - } - for _, pod := range pods { - obj := pod.DeepCopy() - if obj.Annotations == nil { - obj.Annotations = make(map[string]string) - } - obj.Annotations[constant.ComponentReplicasAnnotationKey] = strconv.Itoa(int(replicas)) - c.UpdateResource(obj, c.WorkloadVertex) - } - return nil -} - -func (c *rsmComponentBase) scaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - // if scale in to 0, do not delete pvcs - if c.Component.Replicas == 0 { - reqCtx.Log.Info("scale in to 0, keep all PVCs") - return nil - } - // TODO: check the component definition to determine whether we need to call leave member before deleting replicas. - err := c.leaveMember4ScaleIn(reqCtx, cli, stsObj) - if err != nil { - reqCtx.Log.Info(fmt.Sprintf("leave member at scaling-in error, retry later: %s", err.Error())) - return err - } - return c.deletePVCs4ScaleIn(reqCtx, cli, stsObj) -} - -func (c *rsmComponentBase) postScaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return nil -} - -func (c *rsmComponentBase) leaveMember4ScaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return err - } - for _, pod := range pods { - subs := strings.Split(pod.Name, "-") - if ordinal, err := strconv.ParseInt(subs[len(subs)-1], 10, 32); err != nil { - return err - } else if int32(ordinal) < c.Component.Replicas { - continue - } - lorryCli, err1 := lorry.NewClient(c.Component.CharacterType, *pod) - if err1 != nil { - if err == nil { - err = err1 - } - continue - } - - if lorryCli == nil { - // no lorry in the pod - continue - } - - if err2 := lorryCli.LeaveMember(reqCtx.Ctx); err2 != nil { - if err == nil { - err = err2 - } - } - } - return err // TODO: use requeue-after -} - -func (c *rsmComponentBase) deletePVCs4ScaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - for i := c.Component.Replicas; i < *stsObj.Spec.Replicas; i++ { - for _, vct := range stsObj.Spec.VolumeClaimTemplates { - pvcKey := types.NamespacedName{ - Namespace: stsObj.Namespace, - Name: fmt.Sprintf("%s-%s-%d", vct.Name, stsObj.Name, i), - } - pvc := corev1.PersistentVolumeClaim{} - if err := cli.Get(reqCtx.Ctx, pvcKey, &pvc); err != nil { - return err - } - // Since there are no order guarantee between updating STS and deleting PVCs, if there is any error occurred - // after updating STS and before deleting PVCs, the PVCs intended to scale-in will be leaked. - // For simplicity, the updating dependency is added between them to guarantee that the PVCs to scale-in - // will be deleted or the scaling-in operation will be failed. - c.DeleteResource(&pvc, c.WorkloadVertex) - } - } - return nil -} - -func (c *rsmComponentBase) scaleOut(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - var ( - backupKey = types.NamespacedName{ - Namespace: stsObj.Namespace, - Name: stsObj.Name + "-scaling", - } - ) - - // sts's replicas=0 means it's starting not scaling, skip all the scaling work. - if *stsObj.Spec.Replicas == 0 { - return nil - } - - c.WorkloadVertex.Immutable = true - rsmProto := c.WorkloadVertex.Obj.(*workloads.ReplicatedStateMachine) - stsProto := ConvertRSMToSTS(rsmProto) - d, err := newDataClone(reqCtx, cli, c.Cluster, c.Component, stsObj, stsProto, backupKey) - if err != nil { - return err - } - var succeed bool - if d == nil { - succeed = true - } else { - succeed, err = d.succeed() - if err != nil { - return err - } - } - if succeed { - // pvcs are ready, rsm.replicas should be updated - c.WorkloadVertex.Immutable = false - return c.postScaleOut(reqCtx, cli, stsObj) - } else { - c.WorkloadVertex.Immutable = true - // update objs will trigger cluster reconcile, no need to requeue error - objs, err := d.cloneData(d) - if err != nil { - return err - } - for _, obj := range objs { - c.CreateResource(obj, nil) - } - return nil - } -} - -func (c *rsmComponentBase) postScaleOut(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - var ( - snapshotKey = types.NamespacedName{ - Namespace: stsObj.Namespace, - Name: stsObj.Name + "-scaling", - } - ) - - d, err := newDataClone(reqCtx, cli, c.Cluster, c.Component, stsObj, stsObj, snapshotKey) - if err != nil { - return err - } - if d != nil { - // clean backup resources. - // there will not be any backup resources other than scale out. - tmpObjs, err := d.clearTmpResources() - if err != nil { - return err - } - for _, obj := range tmpObjs { - c.DeleteResource(obj, nil) - } - } - - return nil -} - -func (c *rsmComponentBase) updateUnderlyingResources(reqCtx intctrlutil.RequestCtx, cli client.Client, rsmObj *workloads.ReplicatedStateMachine) error { - if rsmObj == nil { - c.createWorkload() - } else { - c.updateWorkload(rsmObj) - // to work around that the scaled PVC will be deleted at object action. - if err := c.updateVolumes(reqCtx, cli, rsmObj); err != nil { - return err - } - } - if err := c.UpdatePDB(reqCtx, cli); err != nil { - return err - } - return nil -} - -func (c *rsmComponentBase) createWorkload() { - rsmProto := c.WorkloadVertex.Obj.(*workloads.ReplicatedStateMachine) - buildWorkLoadAnnotations(rsmProto, c.Cluster) - c.WorkloadVertex.Obj = rsmProto - c.WorkloadVertex.Action = ictrltypes.ActionCreatePtr() -} - -func (c *rsmComponentBase) updateWorkload(rsmObj *workloads.ReplicatedStateMachine) bool { - rsmObjCopy := rsmObj.DeepCopy() - rsmProto := c.WorkloadVertex.Obj.(*workloads.ReplicatedStateMachine) - - // remove original monitor annotations - if len(rsmObjCopy.Annotations) > 0 { - maps.DeleteFunc(rsmObjCopy.Annotations, func(k, v string) bool { - return strings.HasPrefix(k, "monitor.kubeblocks.io") - }) - } - mergeAnnotations(rsmObjCopy.Annotations, &rsmProto.Annotations) - rsmObjCopy.Annotations = rsmProto.Annotations - buildWorkLoadAnnotations(rsmObjCopy, c.Cluster) - - // keep the original template annotations. - // if annotations exist and are replaced, the rsm will be updated. - mergeAnnotations(rsmObjCopy.Spec.Template.Annotations, &rsmProto.Spec.Template.Annotations) - rsmObjCopy.Spec.Template = rsmProto.Spec.Template - rsmObjCopy.Spec.Replicas = rsmProto.Spec.Replicas - c.updateUpdateStrategy(rsmObjCopy, rsmProto) - rsmObjCopy.Spec.Service = rsmProto.Spec.Service - rsmObjCopy.Spec.AlternativeServices = rsmProto.Spec.AlternativeServices - rsmObjCopy.Spec.Roles = rsmProto.Spec.Roles - rsmObjCopy.Spec.RoleProbe = rsmProto.Spec.RoleProbe - rsmObjCopy.Spec.MembershipReconfiguration = rsmProto.Spec.MembershipReconfiguration - rsmObjCopy.Spec.MemberUpdateStrategy = rsmProto.Spec.MemberUpdateStrategy - rsmObjCopy.Spec.Credential = rsmProto.Spec.Credential - - resolvePodSpecDefaultFields(rsmObj.Spec.Template.Spec, &rsmObjCopy.Spec.Template.Spec) - - delayUpdatePodSpecSystemFields(rsmObj.Spec.Template.Spec, &rsmObjCopy.Spec.Template.Spec) - isTemplateUpdated := !reflect.DeepEqual(&rsmObj.Spec, &rsmObjCopy.Spec) - if isTemplateUpdated { - updatePodSpecSystemFields(&rsmObjCopy.Spec.Template.Spec) - } - if isTemplateUpdated || !reflect.DeepEqual(rsmObj.Annotations, rsmObjCopy.Annotations) { - c.WorkloadVertex.Obj = rsmObjCopy - c.WorkloadVertex.Action = ictrltypes.ActionPtr(ictrltypes.UPDATE) - return true - } - return false -} - -func (c *rsmComponentBase) updateUpdateStrategy(rsmObj, rsmProto *workloads.ReplicatedStateMachine) { - var objMaxUnavailable *intstr.IntOrString - if rsmObj.Spec.UpdateStrategy.RollingUpdate != nil { - objMaxUnavailable = rsmObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable - } - rsmObj.Spec.UpdateStrategy = rsmProto.Spec.UpdateStrategy - if objMaxUnavailable == nil && rsmObj.Spec.UpdateStrategy.RollingUpdate != nil { - // HACK: This field is alpha-level (since v1.24) and is only honored by servers that enable the - // MaxUnavailableStatefulSet feature. - // When we get a nil MaxUnavailable from k8s, we consider that the field is not supported by the server, - // and set the MaxUnavailable as nil explicitly to avoid the workload been updated unexpectedly. - // Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#maximum-unavailable-pods - rsmObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = nil - } -} - -func (c *rsmComponentBase) updateVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, rsmObj *workloads.ReplicatedStateMachine) error { - // PVCs which have been added to the dag because of volume expansion. - pvcNameSet := sets.New[string]() - for _, v := range ictrltypes.FindAll[*corev1.PersistentVolumeClaim](c.Dag) { - pvcNameSet.Insert(v.(*ictrltypes.LifecycleVertex).Obj.GetName()) - } - - for _, vct := range c.Component.VolumeClaimTemplates { - pvcs, err := c.getRunningVolumes(reqCtx, cli, vct.Name, rsmObj) - if err != nil { - return err - } - for _, pvc := range pvcs { - if pvcNameSet.Has(pvc.Name) { - continue - } - c.NoopResource(pvc, c.WorkloadVertex) - } - } - return nil -} - -func (c *rsmComponentBase) getRunningVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, vctName string, - rsmObj *workloads.ReplicatedStateMachine) ([]*corev1.PersistentVolumeClaim, error) { - pvcs, err := listObjWithLabelsInNamespace(reqCtx.Ctx, cli, generics.PersistentVolumeClaimSignature, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - matchedPVCs := make([]*corev1.PersistentVolumeClaim, 0) - prefix := fmt.Sprintf("%s-%s", vctName, rsmObj.Name) - for _, pvc := range pvcs { - if strings.HasPrefix(pvc.Name, prefix) { - matchedPVCs = append(matchedPVCs, pvc) - } - } - return matchedPVCs, nil -} diff --git a/controllers/apps/components/base_stateful_legacy.go b/controllers/apps/components/base_stateful_legacy.go deleted file mode 100644 index 973778b44ff..00000000000 --- a/controllers/apps/components/base_stateful_legacy.go +++ /dev/null @@ -1,752 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - "github.com/apecloud/kubeblocks/internal/generics" - viper "github.com/apecloud/kubeblocks/internal/viperx" -) - -// statefulComponentBase as a base class for single stateful-set based component (stateful & replication & consensus). -type statefulComponentBase struct { - componentBase - // runningWorkload can be nil, and the replicas of workload can be nil (zero) - runningWorkload *appsv1.StatefulSet -} - -func (c *statefulComponentBase) init(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder, load bool) error { - var err error - if builder != nil { - if err = builder.BuildEnv(). - BuildWorkload(). - BuildPDB(). - BuildHeadlessService(). - BuildConfig(). - BuildTLSVolume(). - BuildVolumeMount(). - BuildService(). - BuildTLSCert(). - Complete(); err != nil { - return err - } - } - if load { - c.runningWorkload, err = c.loadRunningWorkload(reqCtx, cli) - if err != nil { - return err - } - } - return nil -} - -func (c *statefulComponentBase) loadRunningWorkload(reqCtx intctrlutil.RequestCtx, cli client.Client) (*appsv1.StatefulSet, error) { - stsList, err := listStsOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return nil, err - } - cnt := len(stsList) - if cnt == 1 { - return stsList[0], nil - } - if cnt == 0 { - return nil, nil - } else { - return nil, fmt.Errorf("more than one workloads found for the component, cluster: %s, component: %s, cnt: %d", - c.GetClusterName(), c.GetName(), cnt) - } -} - -func (c *statefulComponentBase) GetBuiltObjects(builder componentWorkloadBuilder) ([]client.Object, error) { - dag := c.Dag - defer func() { - c.Dag = dag - }() - - c.Dag = graph.NewDAG() - if err := c.init(intctrlutil.RequestCtx{}, nil, builder, false); err != nil { - return nil, err - } - - objs := make([]client.Object, 0) - for _, v := range c.Dag.Vertices() { - if vv, ok := v.(*ictrltypes.LifecycleVertex); ok { - objs = append(objs, vv.Obj) - } - } - return objs, nil -} - -func (c *statefulComponentBase) Create(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { - if err := c.init(reqCtx, cli, builder, false); err != nil { - return err - } - - if err := c.ValidateObjectsAction(); err != nil { - return err - } - - c.SetStatusPhase(appsv1alpha1.CreatingClusterCompPhase, nil, "Create a new component") - - return nil -} - -func (c *statefulComponentBase) Delete(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - // TODO(impl): delete component owned resources - return nil -} - -func (c *statefulComponentBase) Update(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { - if err := c.init(reqCtx, cli, builder, true); err != nil { - return err - } - - if c.runningWorkload != nil { - if err := c.Restart(reqCtx, cli); err != nil { - return err - } - - // cluster.spec.componentSpecs[*].volumeClaimTemplates[*].spec.resources.requests[corev1.ResourceStorage] - if err := c.ExpandVolume(reqCtx, cli); err != nil { - return err - } - - // cluster.spec.componentSpecs[*].replicas - if err := c.HorizontalScale(reqCtx, cli); err != nil { - return err - } - } - - if err := c.updateUnderlyingResources(reqCtx, cli, c.runningWorkload); err != nil { - return err - } - - return c.ResolveObjectsAction(reqCtx, cli) -} - -func (c *statefulComponentBase) Status(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { - if err := c.init(reqCtx, cli, builder, true); err != nil { - return err - } - if c.runningWorkload == nil { - return nil - } - - statusTxn := &statusReconciliationTxn{} - - if err := c.statusExpandVolume(reqCtx, cli, statusTxn); err != nil { - return err - } - - if err := c.horizontalScale(reqCtx, cli, statusTxn); err != nil { - return err - } - - if vertexes, err := c.ComponentSet.HandleRoleChange(reqCtx.Ctx, c.runningWorkload); err != nil { - return err - } else { - for _, v := range vertexes { - c.Dag.AddVertex(v) - } - } - - // TODO(impl): restart pod if needed, move it to @Update and restart pod directly. - if vertexes, err := c.ComponentSet.HandleRestart(reqCtx.Ctx, c.runningWorkload); err != nil { - return err - } else { - for _, v := range vertexes { - c.Dag.AddVertex(v) - } - } - - var delayedRequeueError error - if err := c.StatusWorkload(reqCtx, cli, c.runningWorkload, statusTxn); err != nil { - if !intctrlutil.IsDelayedRequeueError(err) { - return err - } - delayedRequeueError = err - } - - if err := statusTxn.commit(); err != nil { - return err - } - - c.updateWorkload(c.runningWorkload) - - // update component info to pods' annotations - if err := updateComponentInfoToPods(reqCtx.Ctx, cli, c.Cluster, c.Component, c.Dag); err != nil { - return err - } - - // patch the current componentSpec workload's custom labels - if err := updateCustomLabelToPods(reqCtx.Ctx, cli, c.Cluster, c.Component, c.Dag); err != nil { - reqCtx.Event(c.Cluster, corev1.EventTypeWarning, "Component Workload Controller PatchWorkloadCustomLabelFailed", err.Error()) - return err - } - - return delayedRequeueError -} - -func (c *statefulComponentBase) Restart(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return restartPod(&c.runningWorkload.Spec.Template) -} - -func (c *statefulComponentBase) ExpandVolume(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - for _, vct := range c.runningWorkload.Spec.VolumeClaimTemplates { - var proto *corev1.PersistentVolumeClaimTemplate - for _, v := range c.Component.VolumeClaimTemplates { - if v.Name == vct.Name { - proto = &v - break - } - } - // REVIEW: seems we can remove a volume claim from templates at runtime, without any changes and warning messages? - if proto == nil { - continue - } - - if err := c.expandVolumes(reqCtx, cli, vct.Name, proto); err != nil { - return err - } - } - return nil -} - -func (c *statefulComponentBase) expandVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, - vctName string, proto *corev1.PersistentVolumeClaimTemplate) error { - pvcNotFound := false - for i := *c.runningWorkload.Spec.Replicas - 1; i >= 0; i-- { - pvc := &corev1.PersistentVolumeClaim{} - pvcKey := types.NamespacedName{ - Namespace: c.GetNamespace(), - Name: fmt.Sprintf("%s-%s-%d", vctName, c.runningWorkload.Name, i), - } - if err := cli.Get(reqCtx.Ctx, pvcKey, pvc); err != nil { - if apierrors.IsNotFound(err) { - pvcNotFound = true - } else { - return err - } - } - if err := c.updatePVCSize(reqCtx, cli, pvcKey, pvc, pvcNotFound, proto); err != nil { - return err - } - } - return nil -} - -func (c *statefulComponentBase) updatePVCSize(reqCtx intctrlutil.RequestCtx, cli client.Client, pvcKey types.NamespacedName, - pvc *corev1.PersistentVolumeClaim, pvcNotFound bool, vctProto *corev1.PersistentVolumeClaimTemplate) error { - // reference: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#recovering-from-failure-when-expanding-volumes - // 1. Mark the PersistentVolume(PV) that is bound to the PersistentVolumeClaim(PVC) with Retain reclaim policy. - // 2. Delete the PVC. Since PV has Retain reclaim policy - we will not lose any data when we recreate the PVC. - // 3. Delete the claimRef entry from PV specs, so as new PVC can bind to it. This should make the PV Available. - // 4. Re-create the PVC with smaller size than PV and set volumeName field of the PVC to the name of the PV. This should bind new PVC to existing PV. - // 5. Don't forget to restore the reclaim policy of the PV. - newPVC := pvc.DeepCopy() - if pvcNotFound { - newPVC.Name = pvcKey.Name - newPVC.Namespace = pvcKey.Namespace - newPVC.SetLabels(vctProto.Labels) - newPVC.Spec = vctProto.Spec - ml := client.MatchingLabels{ - constant.PVCNameLabelKey: pvcKey.Name, - } - pvList := corev1.PersistentVolumeList{} - if err := cli.List(reqCtx.Ctx, &pvList, ml); err != nil { - return err - } - for _, pv := range pvList.Items { - // find pv referenced this pvc - if pv.Spec.ClaimRef == nil { - continue - } - if pv.Spec.ClaimRef.Name == pvcKey.Name { - newPVC.Spec.VolumeName = pv.Name - break - } - } - } else { - newPVC.Spec.Resources.Requests[corev1.ResourceStorage] = vctProto.Spec.Resources.Requests[corev1.ResourceStorage] - // delete annotation to make it re-bind - delete(newPVC.Annotations, "pv.kubernetes.io/bind-completed") - } - - pvNotFound := false - - // step 1: update pv to retain - pv := &corev1.PersistentVolume{} - pvKey := types.NamespacedName{ - Namespace: pvcKey.Namespace, - Name: newPVC.Spec.VolumeName, - } - if err := cli.Get(reqCtx.Ctx, pvKey, pv); err != nil { - if apierrors.IsNotFound(err) { - pvNotFound = true - } else { - return err - } - } - - type pvcRecreateStep int - const ( - pvPolicyRetainStep pvcRecreateStep = iota - deletePVCStep - removePVClaimRefStep - createPVCStep - pvRestorePolicyStep - ) - - addStepMap := map[pvcRecreateStep]func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex{ - pvPolicyRetainStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 1: update pv to retain - retainPV := pv.DeepCopy() - if retainPV.Labels == nil { - retainPV.Labels = make(map[string]string) - } - // add label to pv, in case pvc get deleted, and we can't find pv - retainPV.Labels[constant.PVCNameLabelKey] = pvcKey.Name - if retainPV.Annotations == nil { - retainPV.Annotations = make(map[string]string) - } - retainPV.Annotations[constant.PVLastClaimPolicyAnnotationKey] = string(pv.Spec.PersistentVolumeReclaimPolicy) - retainPV.Spec.PersistentVolumeReclaimPolicy = corev1.PersistentVolumeReclaimRetain - return c.PatchResource(retainPV, pv, fromVertex) - }, - deletePVCStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 2: delete pvc, this will not delete pv because policy is 'retain' - removeFinalizerPVC := pvc.DeepCopy() - removeFinalizerPVC.SetFinalizers([]string{}) - removeFinalizerPVCVertex := c.PatchResource(removeFinalizerPVC, pvc, fromVertex) - return c.DeleteResource(pvc, removeFinalizerPVCVertex) - }, - removePVClaimRefStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 3: remove claimRef in pv - removeClaimRefPV := pv.DeepCopy() - if removeClaimRefPV.Spec.ClaimRef != nil { - removeClaimRefPV.Spec.ClaimRef.UID = "" - removeClaimRefPV.Spec.ClaimRef.ResourceVersion = "" - } - return c.PatchResource(removeClaimRefPV, pv, fromVertex) - }, - createPVCStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 4: create new pvc - newPVC.SetResourceVersion("") - return c.CreateResource(newPVC, fromVertex) - }, - pvRestorePolicyStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { - // step 5: restore to previous pv policy - restorePV := pv.DeepCopy() - policy := corev1.PersistentVolumeReclaimPolicy(restorePV.Annotations[constant.PVLastClaimPolicyAnnotationKey]) - if len(policy) == 0 { - policy = corev1.PersistentVolumeReclaimDelete - } - restorePV.Spec.PersistentVolumeReclaimPolicy = policy - return c.PatchResource(restorePV, pv, fromVertex) - }, - } - - updatePVCByRecreateFromStep := func(fromStep pvcRecreateStep) { - lastVertex := c.WorkloadVertex - for step := pvRestorePolicyStep; step >= fromStep && step >= pvPolicyRetainStep; step-- { - lastVertex = addStepMap[step](lastVertex, step) - } - } - - targetQuantity := vctProto.Spec.Resources.Requests[corev1.ResourceStorage] - if pvcNotFound && !pvNotFound { - // this could happen if create pvc step failed when recreating pvc - updatePVCByRecreateFromStep(removePVClaimRefStep) - return nil - } - if pvcNotFound && pvNotFound { - // if both pvc and pv not found, do nothing - return nil - } - if reflect.DeepEqual(pvc.Spec.Resources, newPVC.Spec.Resources) && pv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain { - // this could happen if create pvc succeeded but last step failed - updatePVCByRecreateFromStep(pvRestorePolicyStep) - return nil - } - if pvcQuantity := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; !viper.GetBool(constant.CfgRecoverVolumeExpansionFailure) && - pvcQuantity.Cmp(targetQuantity) == 1 && // check if it's compressing volume - targetQuantity.Cmp(*pvc.Status.Capacity.Storage()) >= 0 { // check if target size is greater than or equal to actual size - // this branch means we can update pvc size by recreate it - updatePVCByRecreateFromStep(pvPolicyRetainStep) - return nil - } - if pvcQuantity := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; pvcQuantity.Cmp(vctProto.Spec.Resources.Requests[corev1.ResourceStorage]) != 0 { - // use pvc's update without anything extra - c.UpdateResource(newPVC, c.WorkloadVertex) - return nil - } - // all the else means no need to update - - return nil -} - -func (c *statefulComponentBase) statusExpandVolume(reqCtx intctrlutil.RequestCtx, cli client.Client, txn *statusReconciliationTxn) error { - for _, vct := range c.runningWorkload.Spec.VolumeClaimTemplates { - running, failed, err := c.hasVolumeExpansionRunning(reqCtx, cli, vct.Name) - if err != nil { - return err - } - if failed { - txn.propose(appsv1alpha1.AbnormalClusterCompPhase, func() { - c.SetStatusPhase(appsv1alpha1.AbnormalClusterCompPhase, nil, "Volume Expansion failed") - }) - return nil - } - if running { - txn.propose(appsv1alpha1.UpdatingClusterCompPhase, func() { - c.SetStatusPhase(appsv1alpha1.UpdatingClusterCompPhase, nil, "Volume Expansion failed") - }) - return nil - } - } - return nil -} - -func (c *statefulComponentBase) hasVolumeExpansionRunning(reqCtx intctrlutil.RequestCtx, cli client.Client, vctName string) (bool, bool, error) { - var ( - running bool - failed bool - ) - volumes, err := c.getRunningVolumes(reqCtx, cli, vctName, c.runningWorkload) - if err != nil { - return false, false, err - } - for _, v := range volumes { - if v.Status.Capacity == nil || v.Status.Capacity.Storage().Cmp(v.Spec.Resources.Requests[corev1.ResourceStorage]) >= 0 { - continue - } - running = true - // TODO: how to check the expansion failed? - } - return running, failed, nil -} - -func (c *statefulComponentBase) HorizontalScale(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.horizontalScale(reqCtx, cli, nil) -} - -func (c *statefulComponentBase) horizontalScale(reqCtx intctrlutil.RequestCtx, cli client.Client, txn *statusReconciliationTxn) error { - sts := c.runningWorkload - if sts.Status.ReadyReplicas == c.Component.Replicas { - return nil - } - ret := c.horizontalScaling(sts) - if ret == 0 { - if err := c.postScaleIn(reqCtx, cli, txn); err != nil { - return err - } - if err := c.postScaleOut(reqCtx, cli, sts); err != nil { - return err - } - return nil - } - if ret < 0 { - if err := c.scaleIn(reqCtx, cli, sts); err != nil { - return err - } - } else { - if err := c.scaleOut(reqCtx, cli, sts); err != nil { - return err - } - } - - if err := c.updatePodReplicaLabel4Scaling(reqCtx, cli, c.Component.Replicas); err != nil { - return err - } - - // update KB___ env needed by pod to obtain hostname. - c.updatePodEnvConfig() - - reqCtx.Recorder.Eventf(c.Cluster, - corev1.EventTypeNormal, - "HorizontalScale", - "start horizontal scale component %s of cluster %s from %d to %d", - c.GetName(), c.GetClusterName(), int(c.Component.Replicas)-ret, c.Component.Replicas) - - return nil -} - -// < 0 for scale in, > 0 for scale out, and == 0 for nothing -func (c *statefulComponentBase) horizontalScaling(stsObj *appsv1.StatefulSet) int { - return int(c.Component.Replicas - *stsObj.Spec.Replicas) -} - -func (c *statefulComponentBase) updatePodEnvConfig() { - for _, v := range ictrltypes.FindAll[*corev1.ConfigMap](c.Dag) { - node := v.(*ictrltypes.LifecycleVertex) - // TODO: need a way to reference the env config. - envConfigName := fmt.Sprintf("%s-%s-env", c.GetClusterName(), c.GetName()) - if node.Obj.GetName() == envConfigName { - node.Action = ictrltypes.ActionUpdatePtr() - } - } -} - -func (c *statefulComponentBase) updatePodReplicaLabel4Scaling(reqCtx intctrlutil.RequestCtx, cli client.Client, replicas int32) error { - pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return err - } - for _, pod := range pods { - obj := pod.DeepCopy() - if obj.Annotations == nil { - obj.Annotations = make(map[string]string) - } - obj.Annotations[constant.ComponentReplicasAnnotationKey] = strconv.Itoa(int(replicas)) - c.UpdateResource(obj, c.WorkloadVertex) - } - return nil -} - -func (c *statefulComponentBase) scaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - // if scale in to 0, do not delete pvcs - if c.Component.Replicas == 0 { - return nil - } - for i := c.Component.Replicas; i < *stsObj.Spec.Replicas; i++ { - for _, vct := range stsObj.Spec.VolumeClaimTemplates { - pvcKey := types.NamespacedName{ - Namespace: stsObj.Namespace, - Name: fmt.Sprintf("%s-%s-%d", vct.Name, stsObj.Name, i), - } - pvc := corev1.PersistentVolumeClaim{} - if err := cli.Get(reqCtx.Ctx, pvcKey, &pvc); err != nil { - return err - } - // Since there are no order guarantee between updating STS and deleting PVCs, if there is any error occurred - // after updating STS and before deleting PVCs, the PVCs intended to scale-in will be leaked. - // For simplicity, the updating dependency is added between them to guarantee that the PVCs to scale-in - // will be deleted or the scaling-in operation will be failed. - c.DeleteResource(&pvc, c.WorkloadVertex) - } - } - return nil -} - -func (c *statefulComponentBase) postScaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, txn *statusReconciliationTxn) error { - return nil -} - -func (c *statefulComponentBase) scaleOut(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - var ( - backupKey = types.NamespacedName{ - Namespace: stsObj.Namespace, - Name: stsObj.Name + "-scaling", - } - ) - - // sts's replicas=0 means it's starting not scaling, skip all the scaling work. - if *stsObj.Spec.Replicas == 0 { - return nil - } - - c.WorkloadVertex.Immutable = true - stsProto := c.WorkloadVertex.Obj.(*appsv1.StatefulSet) - d, err := newDataClone(reqCtx, cli, c.Cluster, c.Component, stsObj, stsProto, backupKey) - if err != nil { - return err - } - var succeed bool - if d == nil { - succeed = true - } else { - succeed, err = d.succeed() - if err != nil { - return err - } - } - if succeed { - // pvcs are ready, stateful_set.replicas should be updated - c.WorkloadVertex.Immutable = false - return c.postScaleOut(reqCtx, cli, stsObj) - } else { - c.WorkloadVertex.Immutable = true - // update objs will trigger cluster reconcile, no need to requeue error - objs, err := d.cloneData(d) - if err != nil { - return err - } - for _, obj := range objs { - c.CreateResource(obj, nil) - } - return nil - } -} - -func (c *statefulComponentBase) postScaleOut(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - var ( - snapshotKey = types.NamespacedName{ - Namespace: stsObj.Namespace, - Name: stsObj.Name + "-scaling", - } - ) - - d, err := newDataClone(reqCtx, cli, c.Cluster, c.Component, stsObj, stsObj, snapshotKey) - if err != nil { - return err - } - if d != nil { - // clean backup resources. - // there will not be any backup resources other than scale out. - tmpObjs, err := d.clearTmpResources() - if err != nil { - return err - } - for _, obj := range tmpObjs { - c.DeleteResource(obj, nil) - } - } - - return nil -} - -func (c *statefulComponentBase) updateUnderlyingResources(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - if stsObj == nil { - c.createWorkload() - c.SetStatusPhase(appsv1alpha1.UpdatingClusterCompPhase, nil, "Component workload created") - } else { - if c.updateWorkload(stsObj) { - c.SetStatusPhase(appsv1alpha1.UpdatingClusterCompPhase, nil, "Component workload updated") - } - // to work around that the scaled PVC will be deleted at object action. - if err := c.updateVolumes(reqCtx, cli, stsObj); err != nil { - return err - } - } - if err := c.UpdatePDB(reqCtx, cli); err != nil { - return err - } - if err := c.UpdateService(reqCtx, cli); err != nil { - return err - } - // update KB___ env needed by pod to obtain hostname. - c.updatePodEnvConfig() - return nil -} - -func (c *statefulComponentBase) createWorkload() { - stsProto := c.WorkloadVertex.Obj.(*appsv1.StatefulSet) - c.WorkloadVertex.Obj = stsProto - c.WorkloadVertex.Action = ictrltypes.ActionCreatePtr() -} - -func (c *statefulComponentBase) updateWorkload(stsObj *appsv1.StatefulSet) bool { - stsObjCopy := stsObj.DeepCopy() - stsProto := c.WorkloadVertex.Obj.(*appsv1.StatefulSet) - - // keep the original template annotations. - // if annotations exist and are replaced, the statefulSet will be updated. - mergeAnnotations(stsObjCopy.Spec.Template.Annotations, &stsProto.Spec.Template.Annotations) - buildWorkLoadAnnotations(stsObjCopy, c.Cluster) - stsObjCopy.Spec.Template = stsProto.Spec.Template - stsObjCopy.Spec.Replicas = stsProto.Spec.Replicas - c.updateUpdateStrategy(stsObjCopy, stsProto) - - resolvePodSpecDefaultFields(stsObj.Spec.Template.Spec, &stsObjCopy.Spec.Template.Spec) - - delayUpdatePodSpecSystemFields(stsObj.Spec.Template.Spec, &stsObjCopy.Spec.Template.Spec) - - if !reflect.DeepEqual(&stsObj.Spec, &stsObjCopy.Spec) { - updatePodSpecSystemFields(&stsObjCopy.Spec.Template.Spec) - c.WorkloadVertex.Obj = stsObjCopy - c.WorkloadVertex.Action = ictrltypes.ActionPtr(ictrltypes.UPDATE) - return true - } - return false -} - -func (c *statefulComponentBase) updateUpdateStrategy(stsObj, stsProto *appsv1.StatefulSet) { - var objMaxUnavailable *intstr.IntOrString - if stsObj.Spec.UpdateStrategy.RollingUpdate != nil { - objMaxUnavailable = stsObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable - } - stsObj.Spec.UpdateStrategy = stsProto.Spec.UpdateStrategy - if objMaxUnavailable == nil && stsObj.Spec.UpdateStrategy.RollingUpdate != nil { - // HACK: This field is alpha-level (since v1.24) and is only honored by servers that enable the - // MaxUnavailableStatefulSet feature. - // When we get a nil MaxUnavailable from k8s, we consider that the field is not supported by the server, - // and set the MaxUnavailable as nil explicitly to avoid the workload been updated unexpectedly. - // Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#maximum-unavailable-pods - stsObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = nil - } -} - -func (c *statefulComponentBase) updateVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { - // PVCs which have been added to the dag because of volume expansion. - pvcNameSet := sets.New[string]() - for _, v := range ictrltypes.FindAll[*corev1.PersistentVolumeClaim](c.Dag) { - pvcNameSet.Insert(v.(*ictrltypes.LifecycleVertex).Obj.GetName()) - } - - for _, vct := range c.Component.VolumeClaimTemplates { - pvcs, err := c.getRunningVolumes(reqCtx, cli, vct.Name, stsObj) - if err != nil { - return err - } - for _, pvc := range pvcs { - if pvcNameSet.Has(pvc.Name) { - continue - } - c.NoopResource(pvc, c.WorkloadVertex) - } - } - return nil -} - -func (c *statefulComponentBase) getRunningVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, vctName string, - stsObj *appsv1.StatefulSet) ([]*corev1.PersistentVolumeClaim, error) { - pvcs, err := listObjWithLabelsInNamespace(reqCtx.Ctx, cli, generics.PersistentVolumeClaimSignature, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - matchedPVCs := make([]*corev1.PersistentVolumeClaim, 0) - prefix := fmt.Sprintf("%s-%s", vctName, stsObj.Name) - for _, pvc := range pvcs { - if strings.HasPrefix(pvc.Name, prefix) { - matchedPVCs = append(matchedPVCs, pvc) - } - } - return matchedPVCs, nil -} diff --git a/controllers/apps/components/component.go b/controllers/apps/components/component.go index 8ecd98a0e70..6b6982e773f 100644 --- a/controllers/apps/components/component.go +++ b/controllers/apps/components/component.go @@ -22,116 +22,1498 @@ package components import ( "context" "fmt" + "reflect" + "strconv" + "strings" "time" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" "k8s.io/kubectl/pkg/util/podutils" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/class" + dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" + "github.com/apecloud/kubeblocks/internal/configuration/util" "github.com/apecloud/kubeblocks/internal/constant" - types2 "github.com/apecloud/kubeblocks/internal/controller/client" "github.com/apecloud/kubeblocks/internal/controller/component" "github.com/apecloud/kubeblocks/internal/controller/graph" - "github.com/apecloud/kubeblocks/internal/controller/plan" + rsmcore "github.com/apecloud/kubeblocks/internal/controller/rsm" + ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/generics" + viper "github.com/apecloud/kubeblocks/internal/viperx" + lorry "github.com/apecloud/kubeblocks/lorry/client" ) -// PodIsAvailable checks whether a pod is available with respect to the workload type. -// Deprecated: provide for ops request using, remove this interface later. -func PodIsAvailable(workloadType appsv1alpha1.WorkloadType, pod *corev1.Pod, minReadySeconds int32) bool { - if pod == nil { - return false +const ( + // componentPhaseTransition the event reason indicates that the component transits to a new phase. + componentPhaseTransition = "ComponentPhaseTransition" + + // podContainerFailedTimeout the timeout for container of pod failures, the component phase will be set to Failed/Abnormal after this time. + podContainerFailedTimeout = 10 * time.Second + + // podScheduledFailedTimeout timeout for scheduling failure. + podScheduledFailedTimeout = 30 * time.Second +) + +// rsmComponent as a base class for single rsm based component (stateful & replication & consensus). +type rsmComponent struct { + Client client.Client + Recorder record.EventRecorder + Cluster *appsv1alpha1.Cluster + clusterVersion *appsv1alpha1.ClusterVersion // building config needs the cluster version + component *component.SynthesizedComponent // built synthesized component, replace it with component workload proto + dag *graph.DAG + workloadVertex *ictrltypes.LifecycleVertex // DAG vertex of main workload object + // runningWorkload can be nil, and the replicas of workload can be nil (zero) + runningWorkload *workloads.ReplicatedStateMachine +} + +var _ Component = &rsmComponent{} + +func newRSMComponent(cli client.Client, + recorder record.EventRecorder, + cluster *appsv1alpha1.Cluster, + clusterVersion *appsv1alpha1.ClusterVersion, + synthesizedComponent *component.SynthesizedComponent, + dag *graph.DAG) Component { + comp := &rsmComponent{ + Client: cli, + Recorder: recorder, + Cluster: cluster, + clusterVersion: clusterVersion, + component: synthesizedComponent, + dag: dag, + workloadVertex: nil, + } + return comp +} + +func (c *rsmComponent) GetName() string { + return c.component.Name +} + +func (c *rsmComponent) GetNamespace() string { + return c.Cluster.Namespace +} + +func (c *rsmComponent) GetClusterName() string { + return c.Cluster.Name +} + +func (c *rsmComponent) GetCluster() *appsv1alpha1.Cluster { + return c.Cluster +} + +func (c *rsmComponent) GetClusterVersion() *appsv1alpha1.ClusterVersion { + return c.clusterVersion +} + +func (c *rsmComponent) GetSynthesizedComponent() *component.SynthesizedComponent { + return c.component +} + +func (c *rsmComponent) Create(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + return c.create(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) +} + +func (c *rsmComponent) Update(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + return c.update(reqCtx, cli, c.newBuilder(reqCtx, cli, nil)) +} + +func (c *rsmComponent) Delete(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + // TODO(impl): delete component owned resources + return nil +} + +func (c *rsmComponent) Status(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + return c.status(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionNoopPtr())) +} + +func (c *rsmComponent) newBuilder(reqCtx intctrlutil.RequestCtx, cli client.Client, + action *ictrltypes.LifecycleAction) componentWorkloadBuilder { + return &rsmComponentWorkloadBuilder{ + reqCtx: reqCtx, + client: cli, + comp: c, + defaultAction: action, + error: nil, + envConfig: nil, + workload: nil, + } +} + +func (c *rsmComponent) setWorkload(obj client.Object, action *ictrltypes.LifecycleAction, parent *ictrltypes.LifecycleVertex) { + c.workloadVertex = c.addResource(obj, action, parent) +} + +func (c *rsmComponent) addResource(obj client.Object, action *ictrltypes.LifecycleAction, + parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { + if obj == nil { + panic("try to add nil object") + } + vertex := &ictrltypes.LifecycleVertex{ + Obj: obj, + Action: action, + } + c.dag.AddVertex(vertex) + + if parent != nil { + c.dag.Connect(parent, vertex) + } + return vertex +} + +func (c *rsmComponent) init(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder, load bool) error { + var err error + if builder != nil { + if err = builder.BuildEnv(). + BuildWorkload(). + BuildPDB(). + BuildConfig(). + BuildTLSVolume(). + BuildVolumeMount(). + BuildTLSCert(). + Complete(); err != nil { + return err + } + } + if load { + c.runningWorkload, err = c.loadRunningWorkload(reqCtx, cli) + if err != nil { + return err + } } - switch workloadType { - case appsv1alpha1.Consensus, appsv1alpha1.Replication: - return intctrlutil.PodIsReadyWithLabel(*pod) - case appsv1alpha1.Stateful, appsv1alpha1.Stateless: - return podutils.IsPodAvailable(pod, minReadySeconds, metav1.Time{Time: time.Now()}) + return nil +} + +func (c *rsmComponent) loadRunningWorkload(reqCtx intctrlutil.RequestCtx, cli client.Client) (*workloads.ReplicatedStateMachine, error) { + rsmList, err := listRSMOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.getMatchingLabels()) + if err != nil { + return nil, err + } + cnt := len(rsmList) + switch { + case cnt == 0: + return nil, nil + case cnt == 1: + return rsmList[0], nil default: - panic("unknown workload type") + return nil, fmt.Errorf("more than one workloads found for the component, cluster: %s, component: %s, cnt: %d", + c.GetClusterName(), c.GetName(), cnt) } } -func NewComponent(reqCtx intctrlutil.RequestCtx, - cli client.Client, - definition *appsv1alpha1.ClusterDefinition, - version *appsv1alpha1.ClusterVersion, - cluster *appsv1alpha1.Cluster, - compName string, - dag *graph.DAG) (Component, error) { - var compDef *appsv1alpha1.ClusterComponentDefinition - var compVer *appsv1alpha1.ClusterComponentVersion - compSpec := cluster.Spec.GetComponentByName(compName) - if compSpec != nil { - compDef = definition.GetComponentDefByName(compSpec.ComponentDefRef) - if compDef == nil { - return nil, fmt.Errorf("referenced component definition does not exist, cluster: %s, component: %s, component definition ref:%s", - cluster.Name, compSpec.Name, compSpec.ComponentDefRef) - } - if version != nil { - compVer = version.Spec.GetDefNameMappingComponents()[compSpec.ComponentDefRef] +func (c *rsmComponent) getMatchingLabels() client.MatchingLabels { + return client.MatchingLabels{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: c.GetClusterName(), + constant.KBAppComponentLabelKey: c.GetName(), + } +} + +func (c *rsmComponent) create(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { + if err := c.init(reqCtx, cli, builder, false); err != nil { + return err + } + + if err := c.validateObjectsAction(); err != nil { + return err + } + + return nil +} + +func (c *rsmComponent) update(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { + if err := c.init(reqCtx, cli, builder, true); err != nil { + return err + } + + if c.runningWorkload != nil { + if err := c.restart(reqCtx, cli); err != nil { + return err + } + + // cluster.spec.componentSpecs[*].volumeClaimTemplates[*].spec.resources.requests[corev1.ResourceStorage] + if err := c.expandVolume(reqCtx, cli); err != nil { + return err + } + + // cluster.spec.componentSpecs[*].replicas + if err := c.horizontalScale(reqCtx, cli); err != nil { + return err + } + } + + if err := c.updateUnderlyingResources(reqCtx, cli, c.runningWorkload); err != nil { + return err + } + + return c.resolveObjectsAction(reqCtx, cli) +} + +func (c *rsmComponent) status(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder) error { + if err := c.init(reqCtx, cli, builder, true); err != nil { + return err + } + if c.runningWorkload == nil { + return nil + } + + isDeleting := func() bool { + return !c.runningWorkload.DeletionTimestamp.IsZero() + }() + isZeroReplica := func() bool { + return (c.runningWorkload.Spec.Replicas == nil || *c.runningWorkload.Spec.Replicas == 0) && c.component.Replicas == 0 + }() + pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.getMatchingLabels()) + if err != nil { + return err + } + hasComponentPod := func() bool { + return len(pods) > 0 + }() + isRunning, err := c.isRunning(reqCtx.Ctx, cli, c.runningWorkload) + if err != nil { + return err + } + isAllConfigSynced := c.isAllConfigSynced(reqCtx, cli) + hasFailedPod, messages, err := c.hasFailedPod(reqCtx, cli, pods) + if err != nil { + return err + } + isScaleOutFailed, err := c.isScaleOutFailed(reqCtx, cli) + if err != nil { + return err + } + hasRunningVolumeExpansion, hasFailedVolumeExpansion, err := c.hasVolumeExpansionRunning(reqCtx, cli) + if err != nil { + return err + } + hasFailure := func() bool { + return hasFailedPod || isScaleOutFailed || hasFailedVolumeExpansion + }() + isComponentAvailable, err := c.isAvailable(reqCtx, cli, pods) + if err != nil { + return err + } + isInCreatingPhase := func() bool { + phase := c.getComponentStatus().Phase + return phase == "" || phase == appsv1alpha1.CreatingClusterCompPhase + }() + + updatePodsReady := func(ready bool) { + _ = c.updateStatus("", func(status *appsv1alpha1.ClusterComponentStatus) error { + // if ready flag not changed, don't update the ready time + if status.PodsReady != nil && *status.PodsReady == ready { + return nil + } + status.PodsReady = &ready + if ready { + now := metav1.Now() + status.PodsReadyTime = &now + } + return nil + }) + } + + podsReady := false + switch { + case isDeleting: + c.setStatusPhase(appsv1alpha1.DeletingClusterCompPhase, nil, "component is Deleting") + case isZeroReplica && hasComponentPod: + c.setStatusPhase(appsv1alpha1.StoppingClusterCompPhase, nil, "component is Stopping") + podsReady = true + case isZeroReplica: + c.setStatusPhase(appsv1alpha1.StoppedClusterCompPhase, nil, "component is Stopped") + podsReady = true + case isRunning && isAllConfigSynced && !hasRunningVolumeExpansion: + c.setStatusPhase(appsv1alpha1.RunningClusterCompPhase, nil, "component is Running") + podsReady = true + case !hasFailure && isInCreatingPhase: + c.setStatusPhase(appsv1alpha1.CreatingClusterCompPhase, nil, "Create a new component") + case !hasFailure: + c.setStatusPhase(appsv1alpha1.UpdatingClusterCompPhase, nil, "component is Updating") + case !isComponentAvailable: + c.setStatusPhase(appsv1alpha1.FailedClusterCompPhase, messages, "component is Failed") + default: + c.setStatusPhase(appsv1alpha1.AbnormalClusterCompPhase, nil, "unknown") + } + updatePodsReady(podsReady) + + c.updateMembersStatus() + + // works should continue to be done after spec updated. + if err := c.horizontalScale(reqCtx, cli); err != nil { + return err + } + + c.updateWorkload(c.runningWorkload) + + // update component info to pods' annotations + if err := updateComponentInfoToPods(reqCtx.Ctx, cli, c.Cluster, c.component, c.dag); err != nil { + return err + } + + // patch the current componentSpec workload's custom labels + if err := updateCustomLabelToPods(reqCtx.Ctx, cli, c.Cluster, c.component, c.dag); err != nil { + reqCtx.Event(c.Cluster, corev1.EventTypeWarning, "component Workload Controller PatchWorkloadCustomLabelFailed", err.Error()) + return err + } + + return nil +} + +func (c *rsmComponent) createResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { + return ictrltypes.LifecycleObjectCreate(c.dag, obj, parent) +} + +func (c *rsmComponent) deleteResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { + return ictrltypes.LifecycleObjectDelete(c.dag, obj, parent) +} + +func (c *rsmComponent) updateResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { + return ictrltypes.LifecycleObjectUpdate(c.dag, obj, parent) +} + +func (c *rsmComponent) patchResource(obj client.Object, objCopy client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { + return ictrltypes.LifecycleObjectPatch(c.dag, obj, objCopy, parent) +} + +func (c *rsmComponent) noopResource(obj client.Object, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex { + return ictrltypes.LifecycleObjectNoop(c.dag, obj, parent) +} + +// validateObjectsAction validates the action of objects in dag has been determined. +func (c *rsmComponent) validateObjectsAction() error { + for _, v := range c.dag.Vertices() { + node, ok := v.(*ictrltypes.LifecycleVertex) + if !ok { + return fmt.Errorf("unexpected vertex type, cluster: %s, component: %s, vertex: %T", + c.GetClusterName(), c.GetName(), v) + } + if node.Obj == nil { + return fmt.Errorf("unexpected nil vertex object, cluster: %s, component: %s, vertex: %T", + c.GetClusterName(), c.GetName(), v) + } + if node.Action == nil { + return fmt.Errorf("unexpected nil vertex action, cluster: %s, component: %s, vertex: %T", + c.GetClusterName(), c.GetName(), v) + } + } + return nil +} + +// resolveObjectsAction resolves the action of objects in dag to guarantee that all object actions will be determined. +func (c *rsmComponent) resolveObjectsAction(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + snapshot, err := readCacheSnapshot(reqCtx, cli, c.GetCluster()) + if err != nil { + return err + } + for _, v := range c.dag.Vertices() { + node, ok := v.(*ictrltypes.LifecycleVertex) + if !ok { + return fmt.Errorf("unexpected vertex type, cluster: %s, component: %s, vertex: %T", + c.GetClusterName(), c.GetName(), v) + } + if node.Action == nil { + if action, err := resolveObjectAction(snapshot, node, cli.Scheme()); err != nil { + return err + } else { + node.Action = action + } + } + } + if c.GetCluster().IsStatusUpdating() { + for _, vertex := range c.dag.Vertices() { + v, _ := vertex.(*ictrltypes.LifecycleVertex) + // TODO(refactor): fix me, this is a workaround for h-scaling to update stateful set. + if _, ok := v.Obj.(*appsv1.StatefulSet); !ok { + v.Immutable = true + } + } + } + return c.validateObjectsAction() +} + +// setStatusPhase sets the cluster component phase and messages conditionally. +func (c *rsmComponent) setStatusPhase(phase appsv1alpha1.ClusterComponentPhase, + statusMessage appsv1alpha1.ComponentMessageMap, phaseTransitionMsg string) { + updatefn := func(status *appsv1alpha1.ClusterComponentStatus) error { + if status.Phase == phase { + return nil + } + status.Phase = phase + if status.Message == nil { + status.Message = statusMessage + } else { + for k, v := range statusMessage { + status.Message[k] = v + } + } + return nil + } + if err := c.updateStatus(phaseTransitionMsg, updatefn); err != nil { + panic(fmt.Sprintf("unexpected error occurred while updating component status: %s", err.Error())) + } +} + +// updateStatus updates the cluster component status by @updatefn, with additional message to explain the transition occurred. +func (c *rsmComponent) updateStatus(phaseTransitionMsg string, updatefn func(status *appsv1alpha1.ClusterComponentStatus) error) error { + if updatefn == nil { + return nil + } + + status := c.getComponentStatus() + phase := status.Phase + err := updatefn(&status) + if err != nil { + return err + } + c.Cluster.Status.Components[c.GetName()] = status + + if phase != status.Phase { + // TODO: logging the event + if c.Recorder != nil && phaseTransitionMsg != "" { + c.Recorder.Eventf(c.Cluster, corev1.EventTypeNormal, componentPhaseTransition, phaseTransitionMsg) + } + } + + return nil +} + +func (c *rsmComponent) isRunning(ctx context.Context, cli client.Client, obj client.Object) (bool, error) { + if obj == nil { + return false, nil + } + rsm, ok := obj.(*workloads.ReplicatedStateMachine) + if !ok { + return false, nil + } + if isLatestRevision, err := IsComponentPodsWithLatestRevision(ctx, cli, c.Cluster, rsm); err != nil { + return false, err + } else if !isLatestRevision { + return false, nil + } + + // whether rsm is ready + return rsmcore.IsRSMReady(rsm), nil +} + +// isAvailable tells whether the component is basically available, ether working well or in a fragile state: +// 1. at least one pod is available +// 2. with latest revision +// 3. and with leader role label set +func (c *rsmComponent) isAvailable(reqCtx intctrlutil.RequestCtx, cli client.Client, pods []*corev1.Pod) (bool, error) { + if isLatestRevision, err := IsComponentPodsWithLatestRevision(reqCtx.Ctx, cli, c.Cluster, c.runningWorkload); err != nil { + return false, err + } else if !isLatestRevision { + return false, nil + } + + shouldCheckLeader := func() bool { + return c.component.WorkloadType == appsv1alpha1.Consensus || c.component.WorkloadType == appsv1alpha1.Replication + }() + hasLeaderRoleLabel := func(pod *corev1.Pod) bool { + roleName, ok := pod.Labels[constant.RoleLabelKey] + if !ok { + return false + } + for _, replicaRole := range c.runningWorkload.Spec.Roles { + if roleName == replicaRole.Name && replicaRole.IsLeader { + return true + } + } + return false + } + for _, pod := range pods { + if !podutils.IsPodAvailable(pod, 0, metav1.Time{Time: time.Now()}) { + continue + } + if !shouldCheckLeader { + continue + } + if _, ok := pod.Labels[constant.RoleLabelKey]; ok { + continue + } + if hasLeaderRoleLabel(pod) { + return true, nil + } + } + return false, nil +} + +func (c *rsmComponent) hasFailedPod(reqCtx intctrlutil.RequestCtx, cli client.Client, pods []*corev1.Pod) (bool, appsv1alpha1.ComponentMessageMap, error) { + if isLatestRevision, err := IsComponentPodsWithLatestRevision(reqCtx.Ctx, cli, c.Cluster, c.runningWorkload); err != nil { + return false, nil, err + } else if !isLatestRevision { + return false, nil, nil + } + + var messages appsv1alpha1.ComponentMessageMap + // check pod readiness + hasFailedPod, msg, _ := hasFailedAndTimedOutPod(pods) + if hasFailedPod { + messages = msg + return true, messages, nil + } + // check role probe + if c.component.WorkloadType != appsv1alpha1.Consensus && c.component.WorkloadType != appsv1alpha1.Replication { + return false, messages, nil + } + hasProbeTimeout := false + for _, pod := range pods { + if _, ok := pod.Labels[constant.RoleLabelKey]; ok { + continue + } + for _, condition := range pod.Status.Conditions { + if condition.Type != corev1.PodReady || condition.Status != corev1.ConditionTrue { + continue + } + podsReadyTime := &condition.LastTransitionTime + if isProbeTimeout(c.component.Probes, podsReadyTime) { + hasProbeTimeout = true + if messages == nil { + messages = appsv1alpha1.ComponentMessageMap{} + } + messages.SetObjectMessage(pod.Kind, pod.Name, "Role probe timeout, check whether the application is available") + } + } + } + return hasProbeTimeout, messages, nil +} + +func (c *rsmComponent) isAllConfigSynced(reqCtx intctrlutil.RequestCtx, cli client.Client) bool { + checkFinishedReconfigure := func(cm *corev1.ConfigMap) bool { + labels := cm.GetLabels() + annotations := cm.GetAnnotations() + if len(annotations) == 0 || len(labels) == 0 { + return false + } + hash, _ := util.ComputeHash(cm.Data) + return labels[constant.CMInsConfigurationHashLabelKey] == hash + } + + var ( + cmKey client.ObjectKey + cmObj = &corev1.ConfigMap{} + allConfigSynced = true + ) + for _, configSpec := range c.component.ConfigTemplates { + cmKey = client.ObjectKey{ + Namespace: c.GetNamespace(), + Name: cfgcore.GetComponentCfgName(c.GetClusterName(), c.GetName(), configSpec.Name), + } + if err := cli.Get(reqCtx.Ctx, cmKey, cmObj); err != nil { + return true + } + if !checkFinishedReconfigure(cmObj) { + allConfigSynced = false + break + } + } + return allConfigSynced +} + +func (c *rsmComponent) updateMembersStatus() { + // get component status + componentStatus := c.getComponentStatus() + + // for compatibilities prior KB 0.7.0 + buildConsensusSetStatus := func(membersStatus []workloads.MemberStatus) *appsv1alpha1.ConsensusSetStatus { + consensusSetStatus := &appsv1alpha1.ConsensusSetStatus{ + Leader: appsv1alpha1.ConsensusMemberStatus{ + Name: "", + Pod: constant.ComponentStatusDefaultPodName, + AccessMode: appsv1alpha1.None, + }, + } + for _, memberStatus := range membersStatus { + status := appsv1alpha1.ConsensusMemberStatus{ + Name: memberStatus.Name, + Pod: memberStatus.PodName, + AccessMode: appsv1alpha1.AccessMode(memberStatus.AccessMode), + } + switch { + case memberStatus.IsLeader: + consensusSetStatus.Leader = status + case memberStatus.CanVote: + consensusSetStatus.Followers = append(consensusSetStatus.Followers, status) + default: + consensusSetStatus.Learner = &status + } + } + return consensusSetStatus + } + buildReplicationSetStatus := func(membersStatus []workloads.MemberStatus) *appsv1alpha1.ReplicationSetStatus { + replicationSetStatus := &appsv1alpha1.ReplicationSetStatus{ + Primary: appsv1alpha1.ReplicationMemberStatus{ + Pod: "Unknown", + }, + } + for _, memberStatus := range membersStatus { + status := appsv1alpha1.ReplicationMemberStatus{ + Pod: memberStatus.PodName, + } + switch { + case memberStatus.IsLeader: + replicationSetStatus.Primary = status + default: + replicationSetStatus.Secondaries = append(replicationSetStatus.Secondaries, status) + } + } + return replicationSetStatus + } + + // update members status + switch c.component.WorkloadType { + case appsv1alpha1.Consensus: + componentStatus.ConsensusSetStatus = buildConsensusSetStatus(c.runningWorkload.Status.MembersStatus) + case appsv1alpha1.Replication: + componentStatus.ReplicationSetStatus = buildReplicationSetStatus(c.runningWorkload.Status.MembersStatus) + } + componentStatus.MembersStatus = slices.Clone(c.runningWorkload.Status.MembersStatus) + + // set component status back + c.Cluster.Status.Components[c.GetName()] = componentStatus +} + +func (c *rsmComponent) getComponentStatus() appsv1alpha1.ClusterComponentStatus { + if c.Cluster.Status.Components == nil { + c.Cluster.Status.Components = make(map[string]appsv1alpha1.ClusterComponentStatus) + } + if _, ok := c.Cluster.Status.Components[c.GetName()]; !ok { + c.Cluster.Status.Components[c.GetName()] = appsv1alpha1.ClusterComponentStatus{} + } + return c.Cluster.Status.Components[c.GetName()] +} + +func (c *rsmComponent) isScaleOutFailed(reqCtx intctrlutil.RequestCtx, cli client.Client) (bool, error) { + if c.runningWorkload.Spec.Replicas == nil { + return false, nil + } + if c.component.Replicas <= *c.runningWorkload.Spec.Replicas { + return false, nil + } + if c.workloadVertex == nil { + return false, nil + } + stsObj := ConvertRSMToSTS(c.runningWorkload) + rsmProto := c.workloadVertex.Obj.(*workloads.ReplicatedStateMachine) + stsProto := ConvertRSMToSTS(rsmProto) + backupKey := types.NamespacedName{ + Namespace: stsObj.Namespace, + Name: stsObj.Name + "-scaling", + } + d, err := newDataClone(reqCtx, cli, c.Cluster, c.component, stsObj, stsProto, backupKey) + if err != nil { + return false, err + } + if status, err := d.checkBackupStatus(); err != nil { + return false, err + } else if status == backupStatusFailed { + return true, nil + } + for _, name := range d.pvcKeysToRestore() { + if status, err := d.checkRestoreStatus(name); err != nil { + return false, err + } else if status == backupStatusFailed { + return true, nil + } + } + return false, nil +} + +func (c *rsmComponent) restart(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + return restartPod(&c.runningWorkload.Spec.Template) +} + +func (c *rsmComponent) expandVolume(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + for _, vct := range c.runningWorkload.Spec.VolumeClaimTemplates { + var proto *corev1.PersistentVolumeClaimTemplate + for _, v := range c.component.VolumeClaimTemplates { + if v.Name == vct.Name { + proto = &v + break + } + } + // REVIEW: seems we can remove a volume claim from templates at runtime, without any changes and warning messages? + if proto == nil { + continue + } + + if err := c.expandVolumes(reqCtx, cli, vct.Name, proto); err != nil { + return err + } + } + return nil +} + +func (c *rsmComponent) expandVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, + vctName string, proto *corev1.PersistentVolumeClaimTemplate) error { + pvcNotFound := false + for i := *c.runningWorkload.Spec.Replicas - 1; i >= 0; i-- { + pvc := &corev1.PersistentVolumeClaim{} + pvcKey := types.NamespacedName{ + Namespace: c.GetNamespace(), + Name: fmt.Sprintf("%s-%s-%d", vctName, c.runningWorkload.Name, i), + } + if err := cli.Get(reqCtx.Ctx, pvcKey, pvc); err != nil { + if apierrors.IsNotFound(err) { + pvcNotFound = true + } else { + return err + } + } + if err := c.updatePVCSize(reqCtx, cli, pvcKey, pvc, pvcNotFound, proto); err != nil { + return err + } + } + return nil +} + +func (c *rsmComponent) updatePVCSize(reqCtx intctrlutil.RequestCtx, cli client.Client, pvcKey types.NamespacedName, + pvc *corev1.PersistentVolumeClaim, pvcNotFound bool, vctProto *corev1.PersistentVolumeClaimTemplate) error { + // reference: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#recovering-from-failure-when-expanding-volumes + // 1. Mark the PersistentVolume(PV) that is bound to the PersistentVolumeClaim(PVC) with Retain reclaim policy. + // 2. Delete the PVC. Since PV has Retain reclaim policy - we will not lose any data when we recreate the PVC. + // 3. Delete the claimRef entry from PV specs, so as new PVC can bind to it. This should make the PV Available. + // 4. Re-create the PVC with smaller size than PV and set volumeName field of the PVC to the name of the PV. This should bind new PVC to existing PV. + // 5. Don't forget to restore the reclaim policy of the PV. + newPVC := pvc.DeepCopy() + if pvcNotFound { + newPVC.Name = pvcKey.Name + newPVC.Namespace = pvcKey.Namespace + newPVC.SetLabels(vctProto.Labels) + newPVC.Spec = vctProto.Spec + ml := client.MatchingLabels{ + constant.PVCNameLabelKey: pvcKey.Name, + } + pvList := corev1.PersistentVolumeList{} + if err := cli.List(reqCtx.Ctx, &pvList, ml); err != nil { + return err + } + for _, pv := range pvList.Items { + // find pv referenced this pvc + if pv.Spec.ClaimRef == nil { + continue + } + if pv.Spec.ClaimRef.Name == pvcKey.Name { + newPVC.Spec.VolumeName = pv.Name + break + } } } else { - compDef = definition.GetComponentDefByName(compName) - if version != nil { - compVer = version.Spec.GetDefNameMappingComponents()[compName] + newPVC.Spec.Resources.Requests[corev1.ResourceStorage] = vctProto.Spec.Resources.Requests[corev1.ResourceStorage] + // delete annotation to make it re-bind + delete(newPVC.Annotations, "pv.kubernetes.io/bind-completed") + } + + pvNotFound := false + + // step 1: update pv to retain + pv := &corev1.PersistentVolume{} + pvKey := types.NamespacedName{ + Namespace: pvcKey.Namespace, + Name: newPVC.Spec.VolumeName, + } + if err := cli.Get(reqCtx.Ctx, pvKey, pv); err != nil { + if apierrors.IsNotFound(err) { + pvNotFound = true + } else { + return err } } - if compDef == nil { - return nil, nil + type pvcRecreateStep int + const ( + pvPolicyRetainStep pvcRecreateStep = iota + deletePVCStep + removePVClaimRefStep + createPVCStep + pvRestorePolicyStep + ) + + addStepMap := map[pvcRecreateStep]func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex{ + pvPolicyRetainStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { + // step 1: update pv to retain + retainPV := pv.DeepCopy() + if retainPV.Labels == nil { + retainPV.Labels = make(map[string]string) + } + // add label to pv, in case pvc get deleted, and we can't find pv + retainPV.Labels[constant.PVCNameLabelKey] = pvcKey.Name + if retainPV.Annotations == nil { + retainPV.Annotations = make(map[string]string) + } + retainPV.Annotations[constant.PVLastClaimPolicyAnnotationKey] = string(pv.Spec.PersistentVolumeReclaimPolicy) + retainPV.Spec.PersistentVolumeReclaimPolicy = corev1.PersistentVolumeReclaimRetain + return c.patchResource(retainPV, pv, fromVertex) + }, + deletePVCStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { + // step 2: delete pvc, this will not delete pv because policy is 'retain' + removeFinalizerPVC := pvc.DeepCopy() + removeFinalizerPVC.SetFinalizers([]string{}) + removeFinalizerPVCVertex := c.patchResource(removeFinalizerPVC, pvc, fromVertex) + return c.deleteResource(pvc, removeFinalizerPVCVertex) + }, + removePVClaimRefStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { + // step 3: remove claimRef in pv + removeClaimRefPV := pv.DeepCopy() + if removeClaimRefPV.Spec.ClaimRef != nil { + removeClaimRefPV.Spec.ClaimRef.UID = "" + removeClaimRefPV.Spec.ClaimRef.ResourceVersion = "" + } + return c.patchResource(removeClaimRefPV, pv, fromVertex) + }, + createPVCStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { + // step 4: create new pvc + newPVC.SetResourceVersion("") + return c.createResource(newPVC, fromVertex) + }, + pvRestorePolicyStep: func(fromVertex *ictrltypes.LifecycleVertex, step pvcRecreateStep) *ictrltypes.LifecycleVertex { + // step 5: restore to previous pv policy + restorePV := pv.DeepCopy() + policy := corev1.PersistentVolumeReclaimPolicy(restorePV.Annotations[constant.PVLastClaimPolicyAnnotationKey]) + if len(policy) == 0 { + policy = corev1.PersistentVolumeReclaimDelete + } + restorePV.Spec.PersistentVolumeReclaimPolicy = policy + return c.patchResource(restorePV, pv, fromVertex) + }, + } + + updatePVCByRecreateFromStep := func(fromStep pvcRecreateStep) { + lastVertex := c.workloadVertex + for step := pvRestorePolicyStep; step >= fromStep && step >= pvPolicyRetainStep; step-- { + lastVertex = addStepMap[step](lastVertex, step) + } + } + + targetQuantity := vctProto.Spec.Resources.Requests[corev1.ResourceStorage] + if pvcNotFound && !pvNotFound { + // this could happen if create pvc step failed when recreating pvc + updatePVCByRecreateFromStep(removePVClaimRefStep) + return nil + } + if pvcNotFound && pvNotFound { + // if both pvc and pv not found, do nothing + return nil + } + if reflect.DeepEqual(pvc.Spec.Resources, newPVC.Spec.Resources) && pv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain { + // this could happen if create pvc succeeded but last step failed + updatePVCByRecreateFromStep(pvRestorePolicyStep) + return nil + } + if pvcQuantity := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; !viper.GetBool(constant.CfgRecoverVolumeExpansionFailure) && + pvcQuantity.Cmp(targetQuantity) == 1 && // check if it's compressing volume + targetQuantity.Cmp(*pvc.Status.Capacity.Storage()) >= 0 { // check if target size is greater than or equal to actual size + // this branch means we can update pvc size by recreate it + updatePVCByRecreateFromStep(pvPolicyRetainStep) + return nil + } + if pvcQuantity := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; pvcQuantity.Cmp(vctProto.Spec.Resources.Requests[corev1.ResourceStorage]) != 0 { + // use pvc's update without anything extra + c.updateResource(newPVC, c.workloadVertex) + return nil + } + // all the else means no need to update + + return nil +} + +func (c *rsmComponent) hasVolumeExpansionRunning(reqCtx intctrlutil.RequestCtx, cli client.Client) (bool, bool, error) { + var ( + running bool + failed bool + ) + for _, vct := range c.runningWorkload.Spec.VolumeClaimTemplates { + volumes, err := c.getRunningVolumes(reqCtx, cli, vct.Name, c.runningWorkload) + if err != nil { + return false, false, err + } + for _, v := range volumes { + if v.Status.Capacity == nil || v.Status.Capacity.Storage().Cmp(v.Spec.Resources.Requests[corev1.ResourceStorage]) >= 0 { + continue + } + running = true + // TODO: how to check the expansion failed? + } } + return running, failed, nil +} - clsMgr, err := getClassManager(reqCtx.Ctx, cli, cluster) +func (c *rsmComponent) horizontalScale(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + sts := ConvertRSMToSTS(c.runningWorkload) + if sts.Status.ReadyReplicas == c.component.Replicas { + return nil + } + ret := c.horizontalScaling(sts) + if ret == 0 { + if err := c.postScaleIn(reqCtx, cli); err != nil { + return err + } + if err := c.postScaleOut(reqCtx, cli, sts); err != nil { + return err + } + return nil + } + if ret < 0 { + if err := c.scaleIn(reqCtx, cli, sts); err != nil { + return err + } + } else { + if err := c.scaleOut(reqCtx, cli, sts); err != nil { + return err + } + } + + if err := c.updatePodReplicaLabel4Scaling(reqCtx, cli, c.component.Replicas); err != nil { + return err + } + + // update KB___ env needed by pod to obtain hostname. + c.updatePodEnvConfig() + + reqCtx.Recorder.Eventf(c.Cluster, + corev1.EventTypeNormal, + "HorizontalScale", + "start horizontal scale component %s of cluster %s from %d to %d", + c.GetName(), c.GetClusterName(), int(c.component.Replicas)-ret, c.component.Replicas) + + return nil +} + +// < 0 for scale in, > 0 for scale out, and == 0 for nothing +func (c *rsmComponent) horizontalScaling(stsObj *appsv1.StatefulSet) int { + return int(c.component.Replicas - *stsObj.Spec.Replicas) +} + +func (c *rsmComponent) updatePodEnvConfig() { + for _, v := range ictrltypes.FindAll[*corev1.ConfigMap](c.dag) { + node := v.(*ictrltypes.LifecycleVertex) + // TODO: need a way to reference the env config. + envConfigName := fmt.Sprintf("%s-%s-env", c.GetClusterName(), c.GetName()) + if node.Obj.GetName() == envConfigName { + node.Action = ictrltypes.ActionUpdatePtr() + } + } +} + +func (c *rsmComponent) updatePodReplicaLabel4Scaling(reqCtx intctrlutil.RequestCtx, cli client.Client, replicas int32) error { + pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.getMatchingLabels()) if err != nil { - return nil, err + return err + } + for _, pod := range pods { + obj := pod.DeepCopy() + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[constant.ComponentReplicasAnnotationKey] = strconv.Itoa(int(replicas)) + c.updateResource(obj, c.workloadVertex) + } + return nil +} + +func (c *rsmComponent) scaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { + // if scale in to 0, do not delete pvcs + if c.component.Replicas == 0 { + reqCtx.Log.Info("scale in to 0, keep all PVCs") + return nil } - serviceReferences, err := plan.GenServiceReferences(reqCtx, cli, cluster, compDef, compSpec) + // TODO: check the component definition to determine whether we need to call leave member before deleting replicas. + err := c.leaveMember4ScaleIn(reqCtx, cli, stsObj) if err != nil { - return nil, err + reqCtx.Log.Info(fmt.Sprintf("leave member at scaling-in error, retry later: %s", err.Error())) + return err + } + return c.deletePVCs4ScaleIn(reqCtx, cli, stsObj) +} + +func (c *rsmComponent) postScaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + return nil +} + +func (c *rsmComponent) leaveMember4ScaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { + pods, err := listPodOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.getMatchingLabels()) + if err != nil { + return err + } + for _, pod := range pods { + subs := strings.Split(pod.Name, "-") + if ordinal, err := strconv.ParseInt(subs[len(subs)-1], 10, 32); err != nil { + return err + } else if int32(ordinal) < c.component.Replicas { + continue + } + lorryCli, err1 := lorry.NewClient(c.component.CharacterType, *pod) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + + if lorryCli == nil { + // no lorry in the pod + continue + } + + if err2 := lorryCli.LeaveMember(reqCtx.Ctx); err2 != nil { + if err == nil { + err = err2 + } + } + } + return err // TODO: use requeue-after +} + +func (c *rsmComponent) deletePVCs4ScaleIn(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { + for i := c.component.Replicas; i < *stsObj.Spec.Replicas; i++ { + for _, vct := range stsObj.Spec.VolumeClaimTemplates { + pvcKey := types.NamespacedName{ + Namespace: stsObj.Namespace, + Name: fmt.Sprintf("%s-%s-%d", vct.Name, stsObj.Name, i), + } + pvc := corev1.PersistentVolumeClaim{} + if err := cli.Get(reqCtx.Ctx, pvcKey, &pvc); err != nil { + return err + } + // Since there are no order guarantee between updating STS and deleting PVCs, if there is any error occurred + // after updating STS and before deleting PVCs, the PVCs intended to scale-in will be leaked. + // For simplicity, the updating dependency is added between them to guarantee that the PVCs to scale-in + // will be deleted or the scaling-in operation will be failed. + c.deleteResource(&pvc, c.workloadVertex) + } + } + return nil +} + +func (c *rsmComponent) scaleOut(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { + var ( + backupKey = types.NamespacedName{ + Namespace: stsObj.Namespace, + Name: stsObj.Name + "-scaling", + } + ) + + // sts's replicas=0 means it's starting not scaling, skip all the scaling work. + if *stsObj.Spec.Replicas == 0 { + return nil + } + + c.workloadVertex.Immutable = true + rsmProto := c.workloadVertex.Obj.(*workloads.ReplicatedStateMachine) + stsProto := ConvertRSMToSTS(rsmProto) + d, err := newDataClone(reqCtx, cli, c.Cluster, c.component, stsObj, stsProto, backupKey) + if err != nil { + return err + } + var succeed bool + if d == nil { + succeed = true + } else { + succeed, err = d.succeed() + if err != nil { + return err + } + } + if succeed { + // pvcs are ready, rsm.replicas should be updated + c.workloadVertex.Immutable = false + return c.postScaleOut(reqCtx, cli, stsObj) + } else { + c.workloadVertex.Immutable = true + // update objs will trigger cluster reconcile, no need to requeue error + objs, err := d.cloneData(d) + if err != nil { + return err + } + for _, obj := range objs { + c.createResource(obj, nil) + } + return nil + } +} + +func (c *rsmComponent) postScaleOut(reqCtx intctrlutil.RequestCtx, cli client.Client, stsObj *appsv1.StatefulSet) error { + var ( + snapshotKey = types.NamespacedName{ + Namespace: stsObj.Namespace, + Name: stsObj.Name + "-scaling", + } + ) + + d, err := newDataClone(reqCtx, cli, c.Cluster, c.component, stsObj, stsObj, snapshotKey) + if err != nil { + return err + } + if d != nil { + // clean backup resources. + // there will not be any backup resources other than scale out. + tmpObjs, err := d.clearTmpResources() + if err != nil { + return err + } + for _, obj := range tmpObjs { + c.deleteResource(obj, nil) + } + } + + return nil +} + +func (c *rsmComponent) updateUnderlyingResources(reqCtx intctrlutil.RequestCtx, cli client.Client, rsmObj *workloads.ReplicatedStateMachine) error { + if rsmObj == nil { + c.createWorkload() + } else { + c.updateWorkload(rsmObj) + // to work around that the scaled PVC will be deleted at object action. + if err := c.updateVolumes(reqCtx, cli, rsmObj); err != nil { + return err + } + } + if err := c.updatePDB(reqCtx, cli); err != nil { + return err + } + return nil +} + +func (c *rsmComponent) createWorkload() { + rsmProto := c.workloadVertex.Obj.(*workloads.ReplicatedStateMachine) + buildWorkLoadAnnotations(rsmProto, c.Cluster) + c.workloadVertex.Obj = rsmProto + c.workloadVertex.Action = ictrltypes.ActionCreatePtr() +} + +func (c *rsmComponent) updateWorkload(rsmObj *workloads.ReplicatedStateMachine) bool { + rsmObjCopy := rsmObj.DeepCopy() + rsmProto := c.workloadVertex.Obj.(*workloads.ReplicatedStateMachine) + + // remove original monitor annotations + if len(rsmObjCopy.Annotations) > 0 { + maps.DeleteFunc(rsmObjCopy.Annotations, func(k, v string) bool { + return strings.HasPrefix(k, "monitor.kubeblocks.io") + }) + } + mergeAnnotations(rsmObjCopy.Annotations, &rsmProto.Annotations) + rsmObjCopy.Annotations = rsmProto.Annotations + buildWorkLoadAnnotations(rsmObjCopy, c.Cluster) + + // keep the original template annotations. + // if annotations exist and are replaced, the rsm will be updated. + mergeAnnotations(rsmObjCopy.Spec.Template.Annotations, &rsmProto.Spec.Template.Annotations) + rsmObjCopy.Spec.Template = rsmProto.Spec.Template + rsmObjCopy.Spec.Replicas = rsmProto.Spec.Replicas + c.updateUpdateStrategy(rsmObjCopy, rsmProto) + rsmObjCopy.Spec.Service = rsmProto.Spec.Service + rsmObjCopy.Spec.AlternativeServices = rsmProto.Spec.AlternativeServices + rsmObjCopy.Spec.Roles = rsmProto.Spec.Roles + rsmObjCopy.Spec.RoleProbe = rsmProto.Spec.RoleProbe + rsmObjCopy.Spec.MembershipReconfiguration = rsmProto.Spec.MembershipReconfiguration + rsmObjCopy.Spec.MemberUpdateStrategy = rsmProto.Spec.MemberUpdateStrategy + rsmObjCopy.Spec.Credential = rsmProto.Spec.Credential + + resolvePodSpecDefaultFields(rsmObj.Spec.Template.Spec, &rsmObjCopy.Spec.Template.Spec) + + delayUpdatePodSpecSystemFields(rsmObj.Spec.Template.Spec, &rsmObjCopy.Spec.Template.Spec) + isTemplateUpdated := !reflect.DeepEqual(&rsmObj.Spec, &rsmObjCopy.Spec) + if isTemplateUpdated { + updatePodSpecSystemFields(&rsmObjCopy.Spec.Template.Spec) + } + if isTemplateUpdated || !reflect.DeepEqual(rsmObj.Annotations, rsmObjCopy.Annotations) { + c.workloadVertex.Obj = rsmObjCopy + c.workloadVertex.Action = ictrltypes.ActionPtr(ictrltypes.UPDATE) + return true + } + return false +} + +func (c *rsmComponent) updatePDB(reqCtx intctrlutil.RequestCtx, cli client.Client) error { + pdbObjList, err := listObjWithLabelsInNamespace(reqCtx.Ctx, cli, generics.PodDisruptionBudgetSignature, c.GetNamespace(), c.getMatchingLabels()) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + for _, v := range ictrltypes.FindAll[*policyv1.PodDisruptionBudget](c.dag) { + node := v.(*ictrltypes.LifecycleVertex) + pdbProto := node.Obj.(*policyv1.PodDisruptionBudget) + + if pos := slices.IndexFunc(pdbObjList, func(pdbObj *policyv1.PodDisruptionBudget) bool { + return pdbObj.GetName() == pdbProto.GetName() + }); pos < 0 { + node.Action = ictrltypes.ActionCreatePtr() // TODO: Create or Noop? + } else { + pdbObj := pdbObjList[pos] + if !reflect.DeepEqual(pdbObj.Spec, pdbProto.Spec) { + pdbObj.Spec = pdbProto.Spec + node.Obj = pdbObj + node.Action = ictrltypes.ActionUpdatePtr() + } + } + } + return nil +} + +func (c *rsmComponent) updateUpdateStrategy(rsmObj, rsmProto *workloads.ReplicatedStateMachine) { + var objMaxUnavailable *intstr.IntOrString + if rsmObj.Spec.UpdateStrategy.RollingUpdate != nil { + objMaxUnavailable = rsmObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable + } + rsmObj.Spec.UpdateStrategy = rsmProto.Spec.UpdateStrategy + if objMaxUnavailable == nil && rsmObj.Spec.UpdateStrategy.RollingUpdate != nil { + // HACK: This field is alpha-level (since v1.24) and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. + // When we get a nil MaxUnavailable from k8s, we consider that the field is not supported by the server, + // and set the MaxUnavailable as nil explicitly to avoid the workload been updated unexpectedly. + // Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#maximum-unavailable-pods + rsmObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = nil + } +} + +func (c *rsmComponent) updateVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, rsmObj *workloads.ReplicatedStateMachine) error { + // PVCs which have been added to the dag because of volume expansion. + pvcNameSet := sets.New[string]() + for _, v := range ictrltypes.FindAll[*corev1.PersistentVolumeClaim](c.dag) { + pvcNameSet.Insert(v.(*ictrltypes.LifecycleVertex).Obj.GetName()) } - synthesizedComp, err := component.BuildComponent(reqCtx, clsMgr, cluster, definition, compDef, compSpec, serviceReferences, compVer) + for _, vct := range c.component.VolumeClaimTemplates { + pvcs, err := c.getRunningVolumes(reqCtx, cli, vct.Name, rsmObj) + if err != nil { + return err + } + for _, pvc := range pvcs { + if pvcNameSet.Has(pvc.Name) { + continue + } + c.noopResource(pvc, c.workloadVertex) + } + } + return nil +} + +func (c *rsmComponent) getRunningVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, vctName string, + rsmObj *workloads.ReplicatedStateMachine) ([]*corev1.PersistentVolumeClaim, error) { + pvcs, err := listObjWithLabelsInNamespace(reqCtx.Ctx, cli, generics.PersistentVolumeClaimSignature, c.GetNamespace(), c.getMatchingLabels()) if err != nil { + if apierrors.IsNotFound(err) { + return nil, nil + } return nil, err } - if synthesizedComp == nil { - return nil, nil + matchedPVCs := make([]*corev1.PersistentVolumeClaim, 0) + prefix := fmt.Sprintf("%s-%s", vctName, rsmObj.Name) + for _, pvc := range pvcs { + if strings.HasPrefix(pvc.Name, prefix) { + matchedPVCs = append(matchedPVCs, pvc) + } } + return matchedPVCs, nil +} - if intctrlutil.IsRSMEnabled() { - return newRSMComponent(cli, reqCtx.Recorder, cluster, version, synthesizedComp, dag), nil +// hasFailedAndTimedOutPod returns whether the pods of components are still failed after a PodFailedTimeout period. +func hasFailedAndTimedOutPod(pods []*corev1.Pod) (bool, appsv1alpha1.ComponentMessageMap, time.Duration) { + var ( + hasTimedOutPod bool + messages = appsv1alpha1.ComponentMessageMap{} + hasFailedPod bool + requeueAfter time.Duration + ) + for _, pod := range pods { + isFailed, isTimedOut, messageStr := isPodFailedAndTimedOut(pod) + if !isFailed { + continue + } + if isTimedOut { + hasTimedOutPod = true + messages.SetObjectMessage(pod.Kind, pod.Name, messageStr) + } else { + hasFailedPod = true + } + } + if hasFailedPod && !hasTimedOutPod { + requeueAfter = podContainerFailedTimeout } + return hasTimedOutPod, messages, requeueAfter +} - switch compDef.WorkloadType { - case appsv1alpha1.Replication: - return newReplicationComponent(cli, reqCtx.Recorder, cluster, version, synthesizedComp, dag), nil - case appsv1alpha1.Consensus: - return newConsensusComponent(cli, reqCtx.Recorder, cluster, version, synthesizedComp, dag), nil - case appsv1alpha1.Stateful: - return newStatefulComponent(cli, reqCtx.Recorder, cluster, version, synthesizedComp, dag), nil - case appsv1alpha1.Stateless: - return newStatelessComponent(cli, reqCtx.Recorder, cluster, version, synthesizedComp, dag), nil +// isPodScheduledFailedAndTimedOut checks whether the unscheduled pod has timed out. +func isPodScheduledFailedAndTimedOut(pod *corev1.Pod) (bool, bool, string) { + for _, cond := range pod.Status.Conditions { + if cond.Type != corev1.PodScheduled { + continue + } + if cond.Status == corev1.ConditionTrue { + return false, false, "" + } + return true, time.Now().After(cond.LastTransitionTime.Add(podScheduledFailedTimeout)), cond.Message + } + return false, false, "" +} + +// isPodFailedAndTimedOut checks if the pod is failed and timed out. +func isPodFailedAndTimedOut(pod *corev1.Pod) (bool, bool, string) { + if isFailed, isTimedOut, message := isPodScheduledFailedAndTimedOut(pod); isFailed { + return isFailed, isTimedOut, message + } + initContainerFailed, message := isAnyContainerFailed(pod.Status.InitContainerStatuses) + if initContainerFailed { + return initContainerFailed, isContainerFailedAndTimedOut(pod, corev1.PodInitialized), message + } + containerFailed, message := isAnyContainerFailed(pod.Status.ContainerStatuses) + if containerFailed { + return containerFailed, isContainerFailedAndTimedOut(pod, corev1.ContainersReady), message + } + return false, false, "" +} + +// isAnyContainerFailed checks whether any container in the list is failed. +func isAnyContainerFailed(containersStatus []corev1.ContainerStatus) (bool, string) { + for _, v := range containersStatus { + waitingState := v.State.Waiting + if waitingState != nil && waitingState.Message != "" { + return true, waitingState.Message + } + terminatedState := v.State.Terminated + if terminatedState != nil && terminatedState.Message != "" { + return true, terminatedState.Message + } } - panic(fmt.Sprintf("unknown workload type: %s, cluster: %s, component: %s, component definition ref: %s", - compDef.WorkloadType, cluster.Name, compSpec.Name, compSpec.ComponentDefRef)) + return false, "" } -func getClassManager(ctx context.Context, cli types2.ReadonlyClient, cluster *appsv1alpha1.Cluster) (*class.Manager, error) { - var classDefinitionList appsv1alpha1.ComponentClassDefinitionList - ml := []client.ListOption{ - client.MatchingLabels{constant.ClusterDefLabelKey: cluster.Spec.ClusterDefRef}, +// isContainerFailedAndTimedOut checks whether the failed container has timed out. +func isContainerFailedAndTimedOut(pod *corev1.Pod, podConditionType corev1.PodConditionType) bool { + containerReadyCondition := intctrlutil.GetPodCondition(&pod.Status, podConditionType) + if containerReadyCondition == nil || containerReadyCondition.LastTransitionTime.IsZero() { + return false } - if err := cli.List(ctx, &classDefinitionList, ml...); err != nil { + return time.Now().After(containerReadyCondition.LastTransitionTime.Add(podContainerFailedTimeout)) +} + +type gvkName struct { + gvk schema.GroupVersionKind + ns, name string +} + +type clusterSnapshot map[gvkName]client.Object + +func getGVKName(object client.Object, scheme *runtime.Scheme) (*gvkName, error) { + gvk, err := apiutil.GVKForObject(object, scheme) + if err != nil { return nil, err } + return &gvkName{ + gvk: gvk, + ns: object.GetNamespace(), + name: object.GetName(), + }, nil +} - var constraintList appsv1alpha1.ComponentResourceConstraintList - if err := cli.List(ctx, &constraintList); err != nil { +func isOwnerOf(owner, obj client.Object, scheme *runtime.Scheme) bool { + ro, ok := owner.(runtime.Object) + if !ok { + return false + } + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return false + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + UID: owner.GetUID(), + Name: owner.GetName(), + } + owners := obj.GetOwnerReferences() + referSameObject := func(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name + } + for _, ownerRef := range owners { + if referSameObject(ownerRef, ref) { + return true + } + } + return false +} + +func ownedKinds() []client.ObjectList { + return []client.ObjectList{ + &appsv1.StatefulSetList{}, + &appsv1.DeploymentList{}, + &corev1.ServiceList{}, + &corev1.SecretList{}, + &corev1.ConfigMapList{}, + &corev1.PersistentVolumeClaimList{}, // TODO(merge): remove it? + &policyv1.PodDisruptionBudgetList{}, + &dataprotectionv1alpha1.BackupPolicyList{}, + } +} + +// read all objects owned by component +func readCacheSnapshot(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster *appsv1alpha1.Cluster) (clusterSnapshot, error) { + // list what kinds of object cluster owns + kinds := ownedKinds() + snapshot := make(clusterSnapshot) + ml := client.MatchingLabels{constant.AppInstanceLabelKey: cluster.GetName()} + inNS := client.InNamespace(cluster.Namespace) + for _, list := range kinds { + if err := cli.List(reqCtx.Ctx, list, inNS, ml); err != nil { + return nil, err + } + // reflect get list.Items + items := reflect.ValueOf(list).Elem().FieldByName("Items") + l := items.Len() + for i := 0; i < l; i++ { + // get the underlying object + object := items.Index(i).Addr().Interface().(client.Object) + // put to snapshot if owned by our cluster + if isOwnerOf(cluster, object, cli.Scheme()) { + name, err := getGVKName(object, cli.Scheme()) + if err != nil { + return nil, err + } + snapshot[*name] = object + } + } + } + return snapshot, nil +} + +func resolveObjectAction(snapshot clusterSnapshot, vertex *ictrltypes.LifecycleVertex, scheme *runtime.Scheme) (*ictrltypes.LifecycleAction, error) { + gvk, err := getGVKName(vertex.Obj, scheme) + if err != nil { return nil, err } - return class.NewManager(classDefinitionList, constraintList) + if _, ok := snapshot[*gvk]; ok { + return ictrltypes.ActionNoopPtr(), nil + } else { + return ictrltypes.ActionCreatePtr(), nil + } } diff --git a/controllers/apps/components/component_set.go b/controllers/apps/components/component_set.go deleted file mode 100644 index bc04c07b40b..00000000000 --- a/controllers/apps/components/component_set.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" -) - -// TODO(impl): replace it with ComponentWorkload and <*>Set implementation. - -type componentSet interface { - // IsRunning when relevant k8s workloads changes, it checks whether the component is running. - // you can also reconcile the pods of component till the component is Running here. - IsRunning(ctx context.Context, obj client.Object) (bool, error) - - // PodsReady checks whether all pods of the component are ready. - // it means the pods are available in StatefulSet or Deployment. - PodsReady(ctx context.Context, obj client.Object) (bool, error) - - // PodIsAvailable checks whether a pod of the component is available. - // if the component is Stateless/StatefulSet, the available conditions follows as: - // 1. the pod is ready. - // 2. readyTime reached minReadySeconds. - // if the component is consensusSet,it will be available when the pod is ready and labeled with its role. - PodIsAvailable(pod *corev1.Pod, minReadySeconds int32) bool - - // GetPhaseWhenPodsReadyAndProbeTimeout when the pods of component are ready but the probe timed-out, - // calculate the component phase is Failed or Abnormal. - GetPhaseWhenPodsReadyAndProbeTimeout(pods []*corev1.Pod) (v1alpha1.ClusterComponentPhase, v1alpha1.ComponentMessageMap) - - // GetPhaseWhenPodsNotReady when the pods of component are not ready, calculate the component phase is Failed or Abnormal. - // if return an empty phase, means the pods of component are ready and skips it. - GetPhaseWhenPodsNotReady(ctx context.Context, componentName string, originPhaseIsUpRunning bool) (v1alpha1.ClusterComponentPhase, v1alpha1.ComponentMessageMap, error) - - HandleRestart(ctx context.Context, obj client.Object) ([]graph.Vertex, error) - - HandleRoleChange(ctx context.Context, obj client.Object) ([]graph.Vertex, error) -} - -// componentSetBase is a common component set base struct. -type componentSetBase struct { - Cli client.Client - Cluster *v1alpha1.Cluster - SynthesizedComponent *component.SynthesizedComponent - ComponentSpec *v1alpha1.ClusterComponentSpec // for test cases used only - ComponentDef *v1alpha1.ClusterComponentDefinition // for test cases used only -} diff --git a/controllers/apps/components/workload_builder.go b/controllers/apps/components/component_workload_builder.go similarity index 64% rename from controllers/apps/components/workload_builder.go rename to controllers/apps/components/component_workload_builder.go index 5e680085d7e..6fe93205095 100644 --- a/controllers/apps/components/workload_builder.go +++ b/controllers/apps/components/component_workload_builder.go @@ -35,8 +35,6 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) -// TODO(impl): define a custom workload to encapsulate all the resources. - type componentWorkloadBuilder interface { // runtime, config, script, env, volume, service, monitor, probe BuildEnv() componentWorkloadBuilder @@ -44,87 +42,83 @@ type componentWorkloadBuilder interface { BuildWorkload() componentWorkloadBuilder BuildPDB() componentWorkloadBuilder BuildVolumeMount() componentWorkloadBuilder - BuildService() componentWorkloadBuilder - BuildHeadlessService() componentWorkloadBuilder BuildTLSCert() componentWorkloadBuilder BuildTLSVolume() componentWorkloadBuilder Complete() error } -type componentWorkloadBuilderBase struct { - ReqCtx intctrlutil.RequestCtx - Client client.Client - Comp Component - DefaultAction *ictrltypes.LifecycleAction - ConcreteBuilder componentWorkloadBuilder - Error error - EnvConfig *corev1.ConfigMap - Workload client.Object - LocalObjs []client.Object // cache the objects needed for configuration, should remove this after refactoring the configuration +type rsmComponentWorkloadBuilder struct { + reqCtx intctrlutil.RequestCtx + client client.Client + comp *rsmComponent + defaultAction *ictrltypes.LifecycleAction + error error + envConfig *corev1.ConfigMap + workload client.Object + localObjs []client.Object // cache the objects needed for configuration, should remove this after refactoring the configuration } -func (b *componentWorkloadBuilderBase) BuildEnv() componentWorkloadBuilder { +var _ componentWorkloadBuilder = &rsmComponentWorkloadBuilder{} + +func (b *rsmComponentWorkloadBuilder) BuildEnv() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - envCfg := factory.BuildEnvConfig(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - b.EnvConfig = envCfg - b.LocalObjs = append(b.LocalObjs, envCfg) + envCfg := factory.BuildEnvConfig(b.comp.GetCluster(), b.comp.GetSynthesizedComponent()) + b.envConfig = envCfg + b.localObjs = append(b.localObjs, envCfg) return []client.Object{envCfg}, nil } return b.BuildWrapper(buildfn) } -func (b *componentWorkloadBuilderBase) BuildConfig() componentWorkloadBuilder { +func (b *rsmComponentWorkloadBuilder) BuildConfig() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - if b.Workload == nil { + if b.workload == nil { return nil, fmt.Errorf("build config but workload is nil, cluster: %s, component: %s", - b.Comp.GetClusterName(), b.Comp.GetName()) + b.comp.GetClusterName(), b.comp.GetName()) } err := plan.RenderConfigNScriptFiles( &intctrlutil.ResourceCtx{ - Context: b.ReqCtx.Ctx, - Client: b.Client, - Namespace: b.Comp.GetNamespace(), - ClusterName: b.Comp.GetClusterName(), - ComponentName: b.Comp.GetName(), + Context: b.reqCtx.Ctx, + Client: b.client, + Namespace: b.comp.GetNamespace(), + ClusterName: b.comp.GetClusterName(), + ComponentName: b.comp.GetName(), }, - b.Comp.GetClusterVersion(), - b.Comp.GetCluster(), - b.Comp.GetSynthesizedComponent(), - b.Workload, + b.comp.GetClusterVersion(), + b.comp.GetCluster(), + b.comp.GetSynthesizedComponent(), + b.workload, b.getRuntime(), - b.LocalObjs) + b.localObjs) return nil, err } return b.BuildWrapper(buildfn) } -func (b *componentWorkloadBuilderBase) BuildWorkload4StatefulSet(workloadType string) componentWorkloadBuilder { +func (b *rsmComponentWorkloadBuilder) BuildWorkload() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - if b.EnvConfig == nil { - return nil, fmt.Errorf("build %s workload but env config is nil, cluster: %s, component: %s", - workloadType, b.Comp.GetClusterName(), b.Comp.GetName()) - } - - sts, err := factory.BuildSts(b.ReqCtx, b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent(), b.EnvConfig.Name) + component := b.comp.GetSynthesizedComponent() + obj, err := factory.BuildRSM(b.reqCtx, b.comp.GetCluster(), component, b.envConfig.Name) if err != nil { return nil, err } - b.Workload = sts + + b.workload = obj return nil, nil // don't return sts here } return b.BuildWrapper(buildfn) } -func (b *componentWorkloadBuilderBase) BuildPDB() componentWorkloadBuilder { +func (b *rsmComponentWorkloadBuilder) BuildPDB() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { // if without this handler, the cluster controller will occur error during reconciling. // conditionally build PodDisruptionBudget - synthesizedComponent := b.Comp.GetSynthesizedComponent() + synthesizedComponent := b.comp.GetSynthesizedComponent() if synthesizedComponent.MinAvailable != nil { - pdb := factory.BuildPDB(b.Comp.GetCluster(), synthesizedComponent) + pdb := factory.BuildPDB(b.comp.GetCluster(), synthesizedComponent) return []client.Object{pdb}, nil } else { panic("this shouldn't happen") @@ -133,11 +127,11 @@ func (b *componentWorkloadBuilderBase) BuildPDB() componentWorkloadBuilder { return b.BuildWrapper(buildfn) } -func (b *componentWorkloadBuilderBase) BuildVolumeMount() componentWorkloadBuilder { +func (b *rsmComponentWorkloadBuilder) BuildVolumeMount() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - if b.Workload == nil { + if b.workload == nil { return nil, fmt.Errorf("build volume mount but workload is nil, cluster: %s, component: %s", - b.Comp.GetClusterName(), b.Comp.GetName()) + b.comp.GetClusterName(), b.comp.GetName()) } podSpec := b.getRuntime() @@ -164,30 +158,10 @@ func (b *componentWorkloadBuilderBase) BuildVolumeMount() componentWorkloadBuild return b.BuildWrapper(buildfn) } -func (b *componentWorkloadBuilderBase) BuildService() componentWorkloadBuilder { - buildfn := func() ([]client.Object, error) { - svcList := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - objs := make([]client.Object, 0) - for _, svc := range svcList { - objs = append(objs, svc) - } - return objs, nil - } - return b.BuildWrapper(buildfn) -} - -func (b *componentWorkloadBuilderBase) BuildHeadlessService() componentWorkloadBuilder { - buildfn := func() ([]client.Object, error) { - svc := factory.BuildHeadlessSvc(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - return []client.Object{svc}, nil - } - return b.BuildWrapper(buildfn) -} - -func (b *componentWorkloadBuilderBase) BuildTLSCert() componentWorkloadBuilder { +func (b *rsmComponentWorkloadBuilder) BuildTLSCert() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - cluster := b.Comp.GetCluster() - component := b.Comp.GetSynthesizedComponent() + cluster := b.comp.GetCluster() + component := b.comp.GetSynthesizedComponent() if !component.TLS { return nil, nil } @@ -198,7 +172,7 @@ func (b *componentWorkloadBuilderBase) BuildTLSCert() componentWorkloadBuilder { objs := make([]client.Object, 0) switch component.Issuer.Name { case appsv1alpha1.IssuerUserProvided: - if err := plan.CheckTLSSecretRef(b.ReqCtx.Ctx, b.Client, cluster.Namespace, component.Issuer.SecretRef); err != nil { + if err := plan.CheckTLSSecretRef(b.reqCtx.Ctx, b.client, cluster.Namespace, component.Issuer.SecretRef); err != nil { return nil, err } case appsv1alpha1.IssuerKubeBlocks: @@ -207,59 +181,59 @@ func (b *componentWorkloadBuilderBase) BuildTLSCert() componentWorkloadBuilder { return nil, err } objs = append(objs, secret) - b.LocalObjs = append(b.LocalObjs, secret) + b.localObjs = append(b.localObjs, secret) } return objs, nil } return b.BuildWrapper(buildfn) } -func (b *componentWorkloadBuilderBase) BuildTLSVolume() componentWorkloadBuilder { +func (b *rsmComponentWorkloadBuilder) BuildTLSVolume() componentWorkloadBuilder { buildfn := func() ([]client.Object, error) { - if b.Workload == nil { + if b.workload == nil { return nil, fmt.Errorf("build TLS volumes but workload is nil, cluster: %s, component: %s", - b.Comp.GetClusterName(), b.Comp.GetName()) + b.comp.GetClusterName(), b.comp.GetName()) } // build secret volume and volume mount - return nil, updateTLSVolumeAndVolumeMount(b.getRuntime(), b.Comp.GetClusterName(), *b.Comp.GetSynthesizedComponent()) + return nil, updateTLSVolumeAndVolumeMount(b.getRuntime(), b.comp.GetClusterName(), *b.comp.GetSynthesizedComponent()) } return b.BuildWrapper(buildfn) } -func (b *componentWorkloadBuilderBase) Complete() error { - if b.Error != nil { - return b.Error +func (b *rsmComponentWorkloadBuilder) Complete() error { + if b.error != nil { + return b.error } - if b.Workload == nil { + if b.workload == nil { return fmt.Errorf("fail to create component workloads, cluster: %s, component: %s", - b.Comp.GetClusterName(), b.Comp.GetName()) + b.comp.GetClusterName(), b.comp.GetName()) } - b.Comp.SetWorkload(b.Workload, b.DefaultAction, nil) + b.comp.setWorkload(b.workload, b.defaultAction, nil) return nil } -func (b *componentWorkloadBuilderBase) BuildWrapper(buildfn func() ([]client.Object, error)) componentWorkloadBuilder { - if b.Error != nil || buildfn == nil { - return b.ConcreteBuilder +func (b *rsmComponentWorkloadBuilder) BuildWrapper(buildfn func() ([]client.Object, error)) componentWorkloadBuilder { + if b.error != nil || buildfn == nil { + return b } objs, err := buildfn() if err != nil { - b.Error = err + b.error = err } else { - cluster := b.Comp.GetCluster() - component := b.Comp.GetSynthesizedComponent() + cluster := b.comp.GetCluster() + component := b.comp.GetSynthesizedComponent() if err = updateCustomLabelToObjs(cluster.Name, string(cluster.UID), component.Name, component.CustomLabelSpecs, objs); err != nil { - b.Error = err + b.error = err } for _, obj := range objs { - b.Comp.AddResource(obj, b.DefaultAction, nil) + b.comp.addResource(obj, b.defaultAction, nil) } } - return b.ConcreteBuilder + return b } -func (b *componentWorkloadBuilderBase) getRuntime() *corev1.PodSpec { - switch w := b.Workload.(type) { +func (b *rsmComponentWorkloadBuilder) getRuntime() *corev1.PodSpec { + switch w := b.workload.(type) { case *appsv1.StatefulSet: return &w.Spec.Template.Spec case *appsv1.Deployment: diff --git a/controllers/apps/components/consensus.go b/controllers/apps/components/consensus.go deleted file mode 100644 index 7744d821f17..00000000000 --- a/controllers/apps/components/consensus.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -func newConsensusComponent(cli client.Client, - recorder record.EventRecorder, - cluster *appsv1alpha1.Cluster, - clusterVersion *appsv1alpha1.ClusterVersion, - synthesizedComponent *component.SynthesizedComponent, - dag *graph.DAG) *consensusComponent { - comp := &consensusComponent{ - statefulComponentBase: statefulComponentBase{ - componentBase: componentBase{ - Client: cli, - Recorder: recorder, - Cluster: cluster, - ClusterVersion: clusterVersion, - Component: synthesizedComponent, - ComponentSet: &consensusSet{ - stateful: stateful{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: synthesizedComponent, - ComponentSpec: nil, - ComponentDef: nil, - }, - }, - }, - Dag: dag, - WorkloadVertex: nil, - }, - }, - } - return comp -} - -type consensusComponent struct { - statefulComponentBase -} - -var _ Component = &consensusComponent{} - -func (c *consensusComponent) newBuilder(reqCtx intctrlutil.RequestCtx, cli client.Client, - action *ictrltypes.LifecycleAction) componentWorkloadBuilder { - builder := &consensusComponentWorkloadBuilder{ - componentWorkloadBuilderBase: componentWorkloadBuilderBase{ - ReqCtx: reqCtx, - Client: cli, - Comp: c, - DefaultAction: action, - Error: nil, - EnvConfig: nil, - Workload: nil, - }, - } - builder.ConcreteBuilder = builder - return builder -} - -func (c *consensusComponent) GetWorkloadType() appsv1alpha1.WorkloadType { - return appsv1alpha1.Consensus -} - -func (c *consensusComponent) GetBuiltObjects(reqCtx intctrlutil.RequestCtx, cli client.Client) ([]client.Object, error) { - return c.statefulComponentBase.GetBuiltObjects(c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *consensusComponent) Create(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Create(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *consensusComponent) Update(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Update(reqCtx, cli, c.newBuilder(reqCtx, cli, nil)) -} - -func (c *consensusComponent) Status(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Status(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionNoopPtr())) -} diff --git a/controllers/apps/components/consensus_set.go b/controllers/apps/components/consensus_set.go deleted file mode 100644 index 4feeae186ce..00000000000 --- a/controllers/apps/components/consensus_set.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - - "github.com/google/go-cmp/cmp" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -type consensusSet struct { - stateful -} - -var _ componentSet = &consensusSet{} - -func (r *consensusSet) getName() string { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Name - } - return r.ComponentSpec.Name -} - -func (r *consensusSet) getDefName() string { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.ComponentDef - } - return r.ComponentDef.Name -} - -func (r *consensusSet) getWorkloadType() appsv1alpha1.WorkloadType { - return appsv1alpha1.Consensus -} - -func (r *consensusSet) getReplicas() int32 { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Replicas - } - return r.ComponentSpec.Replicas -} - -func (r *consensusSet) getConsensusSpec() *appsv1alpha1.ConsensusSetSpec { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.ConsensusSpec - } - return r.ComponentDef.ConsensusSpec -} - -func (r *consensusSet) getProbes() *appsv1alpha1.ClusterDefinitionProbes { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Probes - } - return r.ComponentDef.Probes -} - -func (r *consensusSet) IsRunning(ctx context.Context, obj client.Object) (bool, error) { - if obj == nil { - return false, nil - } - sts := convertToStatefulSet(obj) - isRevisionConsistent, err := isStsAndPodsRevisionConsistent(ctx, r.Cli, sts) - if err != nil { - return false, err - } - pods, err := GetPodListByStatefulSet(ctx, r.Cli, sts) - if err != nil { - return false, err - } - for _, pod := range pods { - if !intctrlutil.PodIsReadyWithLabel(pod) { - return false, nil - } - } - - targetReplicas := r.getReplicas() - return statefulSetOfComponentIsReady(sts, isRevisionConsistent, &targetReplicas), nil -} - -func (r *consensusSet) PodsReady(ctx context.Context, obj client.Object) (bool, error) { - return r.stateful.PodsReady(ctx, obj) -} - -func (r *consensusSet) PodIsAvailable(pod *corev1.Pod, minReadySeconds int32) bool { - if pod == nil { - return false - } - return intctrlutil.PodIsReadyWithLabel(*pod) -} - -func (r *consensusSet) GetPhaseWhenPodsReadyAndProbeTimeout(pods []*corev1.Pod) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap) { - var ( - isAbnormal bool - isFailed = true - statusMessages appsv1alpha1.ComponentMessageMap - ) - compStatus, ok := r.Cluster.Status.Components[r.getName()] - if !ok || compStatus.PodsReadyTime == nil { - return "", nil - } - if !isProbeTimeout(r.getProbes(), compStatus.PodsReadyTime) { - return "", nil - } - for _, pod := range pods { - role := pod.Labels[constant.RoleLabelKey] - if role == r.getConsensusSpec().Leader.Name { - isFailed = false - } - if role == "" { - isAbnormal = true - statusMessages.SetObjectMessage(pod.Kind, pod.Name, "Role probe timeout, check whether the application is available") - } - // TODO clear up the message of ready pod in component.message. - } - if isFailed { - return appsv1alpha1.FailedClusterCompPhase, statusMessages - } - if isAbnormal { - return appsv1alpha1.AbnormalClusterCompPhase, statusMessages - } - return "", statusMessages -} - -func (r *consensusSet) GetPhaseWhenPodsNotReady(ctx context.Context, - componentName string, - originPhaseIsUpRunning bool) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap, error) { - stsList := &appsv1.StatefulSetList{} - podList, err := getCompRelatedObjectList(ctx, r.Cli, *r.Cluster, - componentName, stsList) - if err != nil || len(stsList.Items) == 0 { - return "", nil, err - } - stsObj := stsList.Items[0] - podCount := len(podList.Items) - componentReplicas := r.getReplicas() - if podCount == 0 || stsObj.Status.AvailableReplicas == 0 { - return getPhaseWithNoAvailableReplicas(componentReplicas), nil, nil - } - // get the statefulSet of component - var ( - existLatestRevisionFailedPod bool - leaderIsReady bool - consensusSpec = r.getConsensusSpec() - statusMessages = appsv1alpha1.ComponentMessageMap{} - ) - for _, v := range podList.Items { - // if the pod is terminating, ignore it - if v.DeletionTimestamp != nil { - return "", nil, nil - } - labelValue := v.Labels[constant.RoleLabelKey] - if consensusSpec != nil && labelValue == consensusSpec.Leader.Name && intctrlutil.PodIsReady(&v) { - leaderIsReady = true - continue - } - // if component is up running but pod is not ready, this pod should be failed. - // for example: full disk cause readiness probe failed and serve is not available. - // but kubelet only sets the container is not ready and pod is also Running. - if originPhaseIsUpRunning && !intctrlutil.PodIsReady(&v) && intctrlutil.PodIsControlledByLatestRevision(&v, &stsObj) { - existLatestRevisionFailedPod = true - continue - } - isFailed, _, message := IsPodFailedAndTimedOut(&v) - if isFailed && intctrlutil.PodIsControlledByLatestRevision(&v, &stsObj) { - existLatestRevisionFailedPod = true - statusMessages.SetObjectMessage(v.Kind, v.Name, message) - } - } - return getCompPhaseByConditions(existLatestRevisionFailedPod, leaderIsReady, - componentReplicas, int32(podCount), stsObj.Status.AvailableReplicas), statusMessages, nil -} - -func (r *consensusSet) HandleRestart(ctx context.Context, obj client.Object) ([]graph.Vertex, error) { - if r.getWorkloadType() != appsv1alpha1.Consensus { - return nil, nil - } - priorityMapperFn := func(component *appsv1alpha1.ClusterComponentDefinition) map[string]int { - return ComposeRolePriorityMap(component.ConsensusSpec) - } - return r.HandleUpdateWithStrategy(ctx, obj, nil, priorityMapperFn, generateConsensusSerialPlan, generateConsensusBestEffortParallelPlan, generateConsensusParallelPlan) -} - -// HandleRoleChange is the implementation of the type Component interface method, which is used to handle the role change of the Consensus workload. -func (r *consensusSet) HandleRoleChange(ctx context.Context, obj client.Object) ([]graph.Vertex, error) { - if r.getWorkloadType() != appsv1alpha1.Consensus { - return nil, nil - } - - stsObj := convertToStatefulSet(obj) - pods, err := GetPodListByStatefulSet(ctx, r.Cli, stsObj) - if err != nil { - return nil, err - } - - // update cluster.status.component.consensusSetStatus based on the existences for all pods - componentName := r.getName() - - // first, get the old status - var oldConsensusSetStatus *appsv1alpha1.ConsensusSetStatus - if v, ok := r.Cluster.Status.Components[componentName]; ok { - oldConsensusSetStatus = v.ConsensusSetStatus - } - // create the initial status - newConsensusSetStatus := &appsv1alpha1.ConsensusSetStatus{ - Leader: appsv1alpha1.ConsensusMemberStatus{ - Name: "", - Pod: constant.ComponentStatusDefaultPodName, - AccessMode: appsv1alpha1.None, - }, - } - // then, set the new status - setConsensusSetStatusRoles(newConsensusSetStatus, r.getConsensusSpec(), pods) - // if status changed, do update - if !cmp.Equal(newConsensusSetStatus, oldConsensusSetStatus) { - if err = initClusterComponentStatusIfNeed(r.Cluster, componentName, r.getWorkloadType()); err != nil { - return nil, err - } - componentStatus := r.Cluster.Status.Components[componentName] - componentStatus.ConsensusSetStatus = newConsensusSetStatus - r.Cluster.Status.SetComponentStatus(componentName, componentStatus) - - // TODO: does the update order between cluster and env configmap matter? - - // add consensus role info to pod env - return updateConsensusRoleInfo(ctx, r.Cli, r.Cluster, r.getConsensusSpec(), r.getDefName(), componentName, pods) - } - return nil, nil -} - -// newConsensusSet is the constructor of the type consensusSet. -func newConsensusSet(cli client.Client, - cluster *appsv1alpha1.Cluster, - spec *appsv1alpha1.ClusterComponentSpec, - def appsv1alpha1.ClusterComponentDefinition) *consensusSet { - return &consensusSet{ - stateful: stateful{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: nil, - ComponentSpec: spec, - ComponentDef: &def, - }, - }, - } -} diff --git a/controllers/apps/components/consensus_set_test.go b/controllers/apps/components/consensus_set_test.go deleted file mode 100644 index e80c1125a44..00000000000 --- a/controllers/apps/components/consensus_set_test.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" -) - -var _ = Describe("Consensus Component", func() { - var ( - randomStr = testCtx.GetRandomStr() - clusterDefName = "mysql-clusterdef-" + randomStr - clusterVersionName = "mysql-clusterversion-" + randomStr - clusterName = "mysql-" + randomStr - ) - - const ( - consensusCompName = "consensus" - defaultMinReadySeconds int32 = 10 - revisionID = "6fdd48d9cd" - ) - - cleanAll := func() { - // must wait until resources deleted and no longer exist before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - // delete cluster(and all dependent sub-resources), clusterversion and clusterdef - testapps.ClearClusterResources(&testCtx) - - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced resources - testapps.ClearResources(&testCtx, intctrlutil.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - - AfterEach(cleanAll) - - mockClusterStatusProbeTimeout := func(cluster *appsv1alpha1.Cluster) { - // mock pods ready in component status and probe timed out - Expect(testapps.ChangeObjStatus(&testCtx, cluster, func() { - podsReady := true - cluster.Status.Components = map[string]appsv1alpha1.ClusterComponentStatus{ - consensusCompName: { - PodsReady: &podsReady, - PodsReadyTime: &metav1.Time{Time: time.Now().Add(-10 * time.Minute)}, - }, - } - })).ShouldNot(HaveOccurred()) - - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(cluster), func(g Gomega, tmpCluster *appsv1alpha1.Cluster) { - g.Expect(tmpCluster.Status.Components).ShouldNot(BeEmpty()) - })).Should(Succeed()) - } - - Context("Consensus Component test", func() { - It("Consensus Component test", func() { - By(" init cluster, statefulSet, pods") - clusterDef, _, cluster := testapps.InitConsensusMysql(&testCtx, clusterDefName, - clusterVersionName, clusterName, "consensus", consensusCompName) - - sts := testapps.MockConsensusComponentStatefulSet(&testCtx, clusterName, consensusCompName) - componentName := consensusCompName - compDefName := cluster.Spec.GetComponentDefRefName(componentName) - componentDef := clusterDef.GetComponentDefByName(compDefName) - component := cluster.Spec.GetComponentByName(componentName) - - By("test pods are not ready") - consensusComponent := newConsensusSet(k8sClient, cluster, component, *componentDef) - sts.Status.AvailableReplicas = *sts.Spec.Replicas - 1 - podsReady, _ := consensusComponent.PodsReady(ctx, sts) - Expect(podsReady).Should(BeFalse()) - - By("test pods are ready") - // mock sts is ready - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - controllerRevision := fmt.Sprintf("%s-%s-%s", clusterName, consensusCompName, revisionID) - sts.Status.CurrentRevision = controllerRevision - sts.Status.UpdateRevision = controllerRevision - testk8s.MockStatefulSetReady(sts) - })).Should(Succeed()) - - podsReady, _ = consensusComponent.PodsReady(ctx, sts) - Expect(podsReady).Should(BeTrue()) - - By("test component is running") - isRunning, _ := consensusComponent.IsRunning(ctx, sts) - Expect(isRunning).Should(BeFalse()) - - podName := sts.Name + "-0" - podList := testapps.MockConsensusComponentPods(&testCtx, sts, clusterName, consensusCompName) - By("expect for pod is available") - Expect(consensusComponent.PodIsAvailable(podList[0], defaultMinReadySeconds)).Should(BeTrue()) - - By("test handle probe timed out") - mockClusterStatusProbeTimeout(cluster) - testk8s.DeletePodLabelKey(ctx, testCtx, podName, constant.RoleLabelKey) - pod := &corev1.Pod{} - Expect(testCtx.Cli.Get(ctx, client.ObjectKey{Name: podName, Namespace: testCtx.DefaultNamespace}, pod)).Should(Succeed()) - phase, _ := consensusComponent.GetPhaseWhenPodsReadyAndProbeTimeout([]*corev1.Pod{pod}) - Expect(phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - - By("test component is running") - isRunning, _ = consensusComponent.IsRunning(ctx, sts) - Expect(isRunning).Should(BeFalse()) - - By("should return empty string if pod of component is only not ready when component is not up running") - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = []corev1.PodCondition{} - })).Should(Succeed()) - phase, _, _ = consensusComponent.GetPhaseWhenPodsNotReady(ctx, consensusCompName, false) - Expect(string(phase)).Should(Equal("")) - - By("expect component phase is Failed when pod of component is not ready and component is up running") - phase, _, _ = consensusComponent.GetPhaseWhenPodsNotReady(ctx, consensusCompName, true) - Expect(phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - - By("expect component phase is Failed when pod of component is failed") - testk8s.UpdatePodStatusScheduleFailed(ctx, testCtx, podName, testCtx.DefaultNamespace) - phase, _, _ = consensusComponent.GetPhaseWhenPodsNotReady(ctx, consensusCompName, false) - Expect(phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - - By("unready pod is not controlled by latest revision, should return empty string") - // mock pod is not controlled by latest revision - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - sts.Status.UpdateRevision = fmt.Sprintf("%s-%s-%s", clusterName, consensusCompName, "6fdd48d9cd1") - })).Should(Succeed()) - phase, _, _ = consensusComponent.GetPhaseWhenPodsNotReady(ctx, consensusCompName, false) - Expect(string(phase)).Should(Equal("")) - }) - }) -}) diff --git a/controllers/apps/components/consensus_set_utils.go b/controllers/apps/components/consensus_set_utils.go deleted file mode 100644 index e7cf775d2ab..00000000000 --- a/controllers/apps/components/consensus_set_utils.go +++ /dev/null @@ -1,425 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "sort" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete - -type consensusRole string - -type consensusMemberExt struct { - name string - consensusRole consensusRole - accessMode appsv1alpha1.AccessMode - podName string -} - -const ( - roleLeader consensusRole = "Leader" - roleFollower consensusRole = "Follower" - roleLearner consensusRole = "Learner" -) - -const ( - leaderPriority = 1 << 5 - followerReadWritePriority = 1 << 4 - followerReadonlyPriority = 1 << 3 - followerNonePriority = 1 << 2 - learnerPriority = 1 << 1 - emptyConsensusPriority = 1 << 0 - // unknownPriority = 0 -) - -// unknown & empty & learner & 1/2 followers -> 1/2 followers -> leader -func generateConsensusBestEffortParallelPlan(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int) { - start := plan.Start - // append unknown, empty and learner - index := 0 - for _, pod := range pods { - role := pod.Labels[constant.RoleLabelKey] - if rolePriorityMap[role] <= learnerPriority { - nextStep := &Step{} - nextStep.Obj = pod - start.NextSteps = append(start.NextSteps, nextStep) - index++ - } - } - if len(start.NextSteps) > 0 { - start = start.NextSteps[0] - } - // append 1/2 followers - podList := pods[index:] - followerCount := 0 - for _, pod := range podList { - if rolePriorityMap[pod.Labels[constant.RoleLabelKey]] < leaderPriority { - followerCount++ - } - } - end := followerCount / 2 - for i := 0; i < end; i++ { - nextStep := &Step{} - nextStep.Obj = podList[i] - start.NextSteps = append(start.NextSteps, nextStep) - } - - if len(start.NextSteps) > 0 { - start = start.NextSteps[0] - } - // append the other 1/2 followers - podList = podList[end:] - end = followerCount - end - for i := 0; i < end; i++ { - nextStep := &Step{} - nextStep.Obj = podList[i] - start.NextSteps = append(start.NextSteps, nextStep) - } - - if len(start.NextSteps) > 0 { - start = start.NextSteps[0] - } - // append leader - podList = podList[end:] - for _, pod := range podList { - nextStep := &Step{} - nextStep.Obj = pod - start.NextSteps = append(start.NextSteps, nextStep) - } -} - -// unknown & empty & leader & followers & learner -func generateConsensusParallelPlan(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int) { - start := plan.Start - for _, pod := range pods { - nextStep := &Step{} - nextStep.Obj = pod - start.NextSteps = append(start.NextSteps, nextStep) - } -} - -// unknown -> empty -> learner -> followers(none->readonly->readwrite) -> leader -func generateConsensusSerialPlan(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int) { - start := plan.Start - for _, pod := range pods { - nextStep := &Step{} - nextStep.Obj = pod - start.NextSteps = append(start.NextSteps, nextStep) - start = nextStep - } -} - -// ComposeRolePriorityMap generates a priority map based on roles. -func ComposeRolePriorityMap(consensusSpec *appsv1alpha1.ConsensusSetSpec) map[string]int { - if consensusSpec == nil { - consensusSpec = appsv1alpha1.NewConsensusSetSpec() - } - rolePriorityMap := make(map[string]int, 0) - rolePriorityMap[""] = emptyConsensusPriority - rolePriorityMap[consensusSpec.Leader.Name] = leaderPriority - if consensusSpec.Learner != nil { - rolePriorityMap[consensusSpec.Learner.Name] = learnerPriority - } - for _, follower := range consensusSpec.Followers { - switch follower.AccessMode { - case appsv1alpha1.None: - rolePriorityMap[follower.Name] = followerNonePriority - case appsv1alpha1.Readonly: - rolePriorityMap[follower.Name] = followerReadonlyPriority - case appsv1alpha1.ReadWrite: - rolePriorityMap[follower.Name] = followerReadWritePriority - } - } - return rolePriorityMap -} - -// UpdateConsensusSetRoleLabel updates pod role label when internal container role changed -func UpdateConsensusSetRoleLabel(cli client.Client, - reqCtx intctrlutil.RequestCtx, - event *corev1.Event, - componentDef *appsv1alpha1.ClusterComponentDefinition, - pod *corev1.Pod, role string) error { - if componentDef == nil { - return nil - } - return updateConsensusSetRoleLabel(cli, reqCtx, event, componentDef.ConsensusSpec, pod, role) -} - -func updateConsensusSetRoleLabel(cli client.Client, - reqCtx intctrlutil.RequestCtx, - event *corev1.Event, - consensusSpec *appsv1alpha1.ConsensusSetSpec, - pod *corev1.Pod, role string) error { - ctx := reqCtx.Ctx - roleMap := composeConsensusRoleMap(consensusSpec) - // role not defined in CR, ignore it - if _, ok := roleMap[role]; !ok { - return nil - } - - // update pod role label - patch := client.MergeFrom(pod.DeepCopy()) - pod.Labels[constant.RoleLabelKey] = role - pod.Labels[constant.ConsensusSetAccessModeLabelKey] = string(roleMap[role].accessMode) - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - pod.Annotations[constant.LastRoleSnapshotVersionAnnotationKey] = event.EventTime.Time.Format(time.RFC3339Nano) - return cli.Patch(ctx, pod, patch) -} - -func putConsensusMemberExt(roleMap map[string]consensusMemberExt, name string, role consensusRole, accessMode appsv1alpha1.AccessMode) { - if roleMap == nil { - return - } - - if name == "" || role == "" || accessMode == "" { - return - } - - memberExt := consensusMemberExt{ - name: name, - consensusRole: role, - accessMode: accessMode, - } - - roleMap[name] = memberExt -} - -func composeConsensusRoleMap(consensusSpec *appsv1alpha1.ConsensusSetSpec) map[string]consensusMemberExt { - roleMap := make(map[string]consensusMemberExt, 0) - putConsensusMemberExt(roleMap, - consensusSpec.Leader.Name, - roleLeader, - consensusSpec.Leader.AccessMode) - - for _, follower := range consensusSpec.Followers { - putConsensusMemberExt(roleMap, - follower.Name, - roleFollower, - follower.AccessMode) - } - - if consensusSpec.Learner != nil { - putConsensusMemberExt(roleMap, - consensusSpec.Learner.Name, - roleLearner, - consensusSpec.Learner.AccessMode) - } - - return roleMap -} - -func setConsensusSetStatusLeader(consensusSetStatus *appsv1alpha1.ConsensusSetStatus, memberExt consensusMemberExt) bool { - if consensusSetStatus.Leader.Pod == memberExt.podName { - return false - } - consensusSetStatus.Leader.Pod = memberExt.podName - consensusSetStatus.Leader.AccessMode = memberExt.accessMode - consensusSetStatus.Leader.Name = memberExt.name - return true -} - -func setConsensusSetStatusFollower(consensusSetStatus *appsv1alpha1.ConsensusSetStatus, memberExt consensusMemberExt) bool { - for _, member := range consensusSetStatus.Followers { - if member.Pod == memberExt.podName { - return false - } - } - member := appsv1alpha1.ConsensusMemberStatus{ - Pod: memberExt.podName, - AccessMode: memberExt.accessMode, - Name: memberExt.name, - } - consensusSetStatus.Followers = append(consensusSetStatus.Followers, member) - sort.SliceStable(consensusSetStatus.Followers, func(i, j int) bool { - fi := consensusSetStatus.Followers[i] - fj := consensusSetStatus.Followers[j] - return strings.Compare(fi.Pod, fj.Pod) < 0 - }) - return true -} - -func setConsensusSetStatusLearner(consensusSetStatus *appsv1alpha1.ConsensusSetStatus, memberExt consensusMemberExt) bool { - if consensusSetStatus.Learner == nil { - consensusSetStatus.Learner = &appsv1alpha1.ConsensusMemberStatus{} - } - if consensusSetStatus.Learner.Pod == memberExt.podName { - return false - } - consensusSetStatus.Learner.Pod = memberExt.podName - consensusSetStatus.Learner.AccessMode = memberExt.accessMode - consensusSetStatus.Learner.Name = memberExt.name - return true -} - -func resetConsensusSetStatusRole(consensusSetStatus *appsv1alpha1.ConsensusSetStatus, podName string) { - // reset leader - if consensusSetStatus.Leader.Pod == podName { - consensusSetStatus.Leader.Pod = constant.ComponentStatusDefaultPodName - consensusSetStatus.Leader.AccessMode = appsv1alpha1.None - consensusSetStatus.Leader.Name = "" - } - - // reset follower - for index, member := range consensusSetStatus.Followers { - if member.Pod == podName { - consensusSetStatus.Followers = append(consensusSetStatus.Followers[:index], consensusSetStatus.Followers[index+1:]...) - } - } - - // reset learner - if consensusSetStatus.Learner != nil && consensusSetStatus.Learner.Pod == podName { - consensusSetStatus.Learner = nil - } -} - -func setConsensusSetStatusRoles( - consensusSetStatus *appsv1alpha1.ConsensusSetStatus, - consensusSpec *appsv1alpha1.ConsensusSetSpec, - pods []corev1.Pod) { - for _, pod := range pods { - if !intctrlutil.PodIsReadyWithLabel(pod) { - continue - } - - role := pod.Labels[constant.RoleLabelKey] - _ = setConsensusSetStatusRole(consensusSetStatus, consensusSpec, role, pod.Name) - } -} - -func setConsensusSetStatusRole( - consensusSetStatus *appsv1alpha1.ConsensusSetStatus, - consensusSpec *appsv1alpha1.ConsensusSetSpec, - role, podName string) bool { - // mapping role label to consensus member - roleMap := composeConsensusRoleMap(consensusSpec) - memberExt, ok := roleMap[role] - if !ok { - return false - } - memberExt.podName = podName - resetConsensusSetStatusRole(consensusSetStatus, memberExt.podName) - // update cluster.status - needUpdate := false - switch memberExt.consensusRole { - case roleLeader: - needUpdate = setConsensusSetStatusLeader(consensusSetStatus, memberExt) - case roleFollower: - needUpdate = setConsensusSetStatusFollower(consensusSetStatus, memberExt) - case roleLearner: - needUpdate = setConsensusSetStatusLearner(consensusSetStatus, memberExt) - } - return needUpdate -} - -func updateConsensusRoleInfo(ctx context.Context, - cli client.Client, - cluster *appsv1alpha1.Cluster, - consensusSpec *appsv1alpha1.ConsensusSetSpec, - componentName string, - compDefName string, - pods []corev1.Pod) ([]graph.Vertex, error) { - leader, followers := composeRoleEnv(consensusSpec, pods) - ml := client.MatchingLabels{ - constant.AppInstanceLabelKey: cluster.GetName(), - constant.KBAppComponentLabelKey: componentName, - constant.AppConfigTypeLabelKey: "kubeblocks-env", - } - configList := &corev1.ConfigMapList{} - if err := cli.List(ctx, configList, client.InNamespace(cluster.Namespace), ml); err != nil { - return nil, err - } - - vertexes := make([]graph.Vertex, 0) - for idx := range configList.Items { - config := configList.Items[idx] - if config.Data == nil { - config.Data = make(map[string]string) - } - - config.Data["KB_LEADER"] = leader - config.Data["KB_FOLLOWERS"] = followers - // TODO: need to deprecate 'compDefName' being part of variable name, as it's redundant - // and introduce env/cm key naming reference complexity - config.Data["KB_"+strings.ToUpper(compDefName)+"_LEADER"] = leader - config.Data["KB_"+strings.ToUpper(compDefName)+"_FOLLOWERS"] = followers - vertexes = append(vertexes, &ictrltypes.LifecycleVertex{ - Obj: &config, - Action: ictrltypes.ActionUpdatePtr(), - }) - } - - // patch pods' annotations - for idx := range pods { - pod := pods[idx] - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - pod.Annotations[constant.LeaderAnnotationKey] = leader - vertexes = append(vertexes, &ictrltypes.LifecycleVertex{ - Obj: &pod, - Action: ictrltypes.ActionUpdatePtr(), - }) - } - - return vertexes, nil -} - -func composeRoleEnv(consensusSpec *appsv1alpha1.ConsensusSetSpec, pods []corev1.Pod) (leader, followers string) { - leader, followers = "", "" - for _, pod := range pods { - if !intctrlutil.PodIsReadyWithLabel(pod) { - continue - } - role := pod.Labels[constant.RoleLabelKey] - // mapping role label to consensus member - roleMap := composeConsensusRoleMap(consensusSpec) - memberExt, ok := roleMap[role] - if !ok { - continue - } - switch memberExt.consensusRole { - case roleLeader: - leader = pod.Name - case roleFollower: - if len(followers) > 0 { - followers += "," - } - followers += pod.Name - case roleLearner: - // TODO: CT - } - } - return -} diff --git a/controllers/apps/components/consensus_set_utils_test.go b/controllers/apps/components/consensus_set_utils_test.go deleted file mode 100644 index 88c1cb8a38c..00000000000 --- a/controllers/apps/components/consensus_set_utils_test.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/assert" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controllerutil" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" -) - -func TestIsReady(t *testing.T) { - set := testk8s.NewFakeStatefulSet("foo", 3) - pod := testk8s.NewFakeStatefulSetPod(set, 1) - pod.Status.Conditions = []v1.PodCondition{ - { - Type: v1.PodReady, - Status: v1.ConditionTrue, - }, - } - pod.Labels = map[string]string{constant.RoleLabelKey: "leader"} - if !controllerutil.PodIsReadyWithLabel(*pod) { - t.Errorf("isReady returned false negative") - } -} - -func TestInitClusterComponentStatusIfNeed(t *testing.T) { - componentName := "foo" - cluster := &appsv1alpha1.Cluster{ - Spec: appsv1alpha1.ClusterSpec{ - ComponentSpecs: []appsv1alpha1.ClusterComponentSpec{ - { - Name: componentName, - ComponentDefRef: componentName, - }, - }, - }, - } - if err := initClusterComponentStatusIfNeed(cluster, componentName, appsv1alpha1.Consensus); err != nil { - t.Errorf("caught error %v", err) - } - - if len(cluster.Status.Components) == 0 { - t.Errorf("cluster.Status.ComponentDefs[*] not initialized properly") - } - if _, ok := cluster.Status.Components[componentName]; !ok { - t.Errorf("cluster.Status.ComponentDefs[componentName] not initialized properly") - } - consensusSetStatus := cluster.Status.Components[componentName].ConsensusSetStatus - if consensusSetStatus == nil { - t.Errorf("cluster.Status.ComponentDefs[componentName].ConsensusSetStatus not initialized properly") - } else if consensusSetStatus.Leader.Name != "" || - consensusSetStatus.Leader.AccessMode != appsv1alpha1.None || - consensusSetStatus.Leader.Pod != constant.ComponentStatusDefaultPodName { - t.Errorf("cluster.Status.ComponentDefs[componentName].ConsensusSetStatus.Leader not initialized properly") - } -} - -func TestGetPodRevision(t *testing.T) { - set := testk8s.NewFakeStatefulSet("foo", 3) - pod := testk8s.NewFakeStatefulSetPod(set, 1) - if controllerutil.GetPodRevision(pod) != "" { - t.Errorf("revision should be empty") - } - - pod.Labels = make(map[string]string, 0) - pod.Labels[apps.StatefulSetRevisionLabel] = "bar" - - if controllerutil.GetPodRevision(pod) != "bar" { - t.Errorf("revision not matched") - } -} - -func TestSortPods(t *testing.T) { - createMockPods := func(replicas int, stsName string) []v1.Pod { - pods := make([]v1.Pod, replicas) - for i := 0; i < replicas; i++ { - pods[i] = v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: stsName + "-" + strconv.Itoa(i), - Namespace: "default", - Labels: map[string]string{ - constant.RoleLabelKey: "learner", - }, - }, - } - } - return pods - } - randSort := func(pods []v1.Pod) []v1.Pod { - n := len(pods) - newPod := make([]v1.Pod, n) - copy(newPod, pods) - for i := n; i > 0; i-- { - randIndex := rand.Intn(i) - newPod[n-1], newPod[randIndex] = newPod[randIndex], newPod[n-1] - } - return newPod - } - - type args struct { - pods []v1.Pod - rolePriorityMap map[string]int - } - tests := []struct { - name string - args args - want []v1.Pod - wantErr bool - }{{ - name: "test_normal", - args: args{ - rolePriorityMap: map[string]int{ - "learner": 10, - }, - }, - want: createMockPods(8, "for-test"), - wantErr: false, - }, { - name: "badcase", - args: args{ - rolePriorityMap: map[string]int{ - "learner": 10, - }, - }, - want: createMockPods(12, "for-test"), - wantErr: false, - }} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.args.pods = randSort(tt.want) - SortPods(tt.args.pods, tt.args.rolePriorityMap, constant.RoleLabelKey) - if !tt.wantErr { - assert.Equal(t, tt.args.pods, tt.want) - } - }) - } -} - -func TestComposeRoleEnv(t *testing.T) { - componentDef := &appsv1alpha1.ClusterComponentDefinition{ - WorkloadType: appsv1alpha1.Consensus, - ConsensusSpec: &appsv1alpha1.ConsensusSetSpec{ - Leader: appsv1alpha1.ConsensusMember{ - Name: "leader", - AccessMode: appsv1alpha1.ReadWrite, - }, - Followers: []appsv1alpha1.ConsensusMember{ - { - Name: "follower", - AccessMode: appsv1alpha1.Readonly, - }, - }, - }, - } - - set := testk8s.NewFakeStatefulSet("foo", 3) - pods := make([]v1.Pod, 0) - for i := 0; i < 5; i++ { - pod := testk8s.NewFakeStatefulSetPod(set, i) - pod.Status.Conditions = []v1.PodCondition{ - { - Type: v1.PodReady, - Status: v1.ConditionTrue, - }, - } - pod.Labels = map[string]string{constant.RoleLabelKey: "follower"} - pods = append(pods, *pod) - } - pods[0].Labels = map[string]string{constant.RoleLabelKey: "leader"} - leader, followers := composeRoleEnv(componentDef.ConsensusSpec, pods) - assert.Equal(t, "foo-0", leader) - assert.Equal(t, "foo-1,foo-2,foo-3,foo-4", followers) - - dt := time.Now() - pods[3].DeletionTimestamp = &metav1.Time{Time: dt} - pods[4].DeletionTimestamp = &metav1.Time{Time: dt} - leader, followers = composeRoleEnv(componentDef.ConsensusSpec, pods) - assert.Equal(t, "foo-0", leader) - assert.Equal(t, "foo-1,foo-2", followers) -} diff --git a/controllers/apps/components/consensus_workload.go b/controllers/apps/components/consensus_workload.go deleted file mode 100644 index ba1e0e579e5..00000000000 --- a/controllers/apps/components/consensus_workload.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "github.com/apecloud/kubeblocks/internal/controller/factory" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/apecloud/kubeblocks/internal/constant" -) - -type consensusComponentWorkloadBuilder struct { - componentWorkloadBuilderBase -} - -var _ componentWorkloadBuilder = &consensusComponentWorkloadBuilder{} - -func (b *consensusComponentWorkloadBuilder) BuildWorkload() componentWorkloadBuilder { - return b.BuildWorkload4StatefulSet("consensus") -} - -func (b *consensusComponentWorkloadBuilder) BuildService() componentWorkloadBuilder { - buildfn := func() ([]client.Object, error) { - svcList := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - objs := make([]client.Object, 0, len(svcList)) - leader := b.Comp.GetConsensusSpec().Leader - for _, svc := range svcList { - if len(leader.Name) > 0 { - svc.Spec.Selector[constant.RoleLabelKey] = leader.Name - } - objs = append(objs, svc) - } - return objs, nil - } - return b.BuildWrapper(buildfn) -} diff --git a/controllers/apps/components/base_stateful_hscale.go b/controllers/apps/components/hscale_volume_populator.go similarity index 100% rename from controllers/apps/components/base_stateful_hscale.go rename to controllers/apps/components/hscale_volume_populator.go diff --git a/controllers/apps/components/plan.go b/controllers/apps/components/plan.go deleted file mode 100644 index f7d5791c2bb..00000000000 --- a/controllers/apps/components/plan.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -type Plan struct { - Start *Step - WalkFunc walkFunc -} - -type Step struct { - Obj interface{} - NextSteps []*Step -} - -type walkFunc func(obj interface{}) (bool, error) - -// WalkOneStep process plan stepping -// @return isCompleted -// @return err -func (p *Plan) WalkOneStep() (bool, error) { - if p == nil { - return true, nil - } - - if len(p.Start.NextSteps) == 0 { - return true, nil - } - - shouldStop := false - for _, step := range p.Start.NextSteps { - walked, err := p.WalkFunc(step.Obj) - if err != nil { - return false, err - } - if walked { - shouldStop = true - } - } - if shouldStop { - return false, nil - } - - // generate new plan - plan := &Plan{} - plan.Start = &Step{} - plan.WalkFunc = p.WalkFunc - plan.Start.NextSteps = make([]*Step, 0) - for _, step := range p.Start.NextSteps { - for _, nextStep := range step.NextSteps { - if !containStep(plan.Start.NextSteps, nextStep) { - plan.Start.NextSteps = append(plan.Start.NextSteps, nextStep) - } - } - } - return plan.WalkOneStep() -} - -func containStep(steps []*Step, step *Step) bool { - for _, s := range steps { - if s == step { - return true - } - } - return false -} diff --git a/controllers/apps/components/plan_test.go b/controllers/apps/components/plan_test.go deleted file mode 100644 index 628064220c0..00000000000 --- a/controllers/apps/components/plan_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "testing" -) - -func TestWalkOneStep(t *testing.T) { - plan := &Plan{} - plan.Start = &Step{} - plan.Start.NextSteps = make([]*Step, 1) - plan.WalkFunc = func(obj interface{}) (bool, error) { - currentPos := obj.(int) - if currentPos == 2 { - return true, nil - } - - return false, nil - } - - step1 := &Step{} - step1.Obj = 1 - step1.NextSteps = make([]*Step, 1) - plan.Start.NextSteps[0] = step1 - - step2 := &Step{} - step2.Obj = 2 - step1.NextSteps[0] = step2 - - end, err := plan.WalkOneStep() - if err != nil { - t.Errorf("walk error: %v", err) - } - if end { - t.Errorf("walk should not end") - } - - step2.Obj = 3 - - end, err = plan.WalkOneStep() - - if err != nil { - t.Errorf("walk error: %v", err) - } - if !end { - t.Errorf("walk should end") - } - -} diff --git a/controllers/apps/components/replication.go b/controllers/apps/components/replication.go deleted file mode 100644 index 4a98e1cf998..00000000000 --- a/controllers/apps/components/replication.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -func newReplicationComponent(cli client.Client, - recorder record.EventRecorder, - cluster *appsv1alpha1.Cluster, - clusterVersion *appsv1alpha1.ClusterVersion, - synthesizedComponent *component.SynthesizedComponent, - dag *graph.DAG) *replicationComponent { - comp := &replicationComponent{ - statefulComponentBase: statefulComponentBase{ - componentBase: componentBase{ - Client: cli, - Recorder: recorder, - Cluster: cluster, - ClusterVersion: clusterVersion, - Component: synthesizedComponent, - ComponentSet: &replicationSet{ - stateful: stateful{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: synthesizedComponent, - ComponentSpec: nil, - ComponentDef: nil, - }, - }, - }, - Dag: dag, - WorkloadVertex: nil, - }, - }, - } - return comp -} - -type replicationComponent struct { - statefulComponentBase -} - -var _ Component = &replicationComponent{} - -func (c *replicationComponent) newBuilder(reqCtx intctrlutil.RequestCtx, cli client.Client, - action *ictrltypes.LifecycleAction) componentWorkloadBuilder { - builder := &replicationComponentWorkloadBuilder{ - componentWorkloadBuilderBase: componentWorkloadBuilderBase{ - ReqCtx: reqCtx, - Client: cli, - Comp: c, - DefaultAction: action, - Error: nil, - EnvConfig: nil, - Workload: nil, - }, - } - builder.ConcreteBuilder = builder - return builder -} - -func (c *replicationComponent) GetWorkloadType() appsv1alpha1.WorkloadType { - return appsv1alpha1.Replication -} - -func (c *replicationComponent) GetBuiltObjects(reqCtx intctrlutil.RequestCtx, cli client.Client) ([]client.Object, error) { - return c.statefulComponentBase.GetBuiltObjects(c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *replicationComponent) Create(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Create(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *replicationComponent) Update(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Update(reqCtx, cli, c.newBuilder(reqCtx, cli, nil)) -} - -func (c *replicationComponent) Status(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Status(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionNoopPtr())) -} diff --git a/controllers/apps/components/replication_set.go b/controllers/apps/components/replication_set.go deleted file mode 100644 index b3f922c21de..00000000000 --- a/controllers/apps/components/replication_set.go +++ /dev/null @@ -1,277 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -const ( - emptyReplicationPriority = iota - secondaryPriority - primaryPriority -) - -// replicationSet is a component object used by Cluster, ClusterComponentDefinition and ClusterComponentSpec -type replicationSet struct { - stateful -} - -var _ componentSet = &replicationSet{} - -func (r *replicationSet) getName() string { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Name - } - return r.ComponentSpec.Name -} - -func (r *replicationSet) getWorkloadType() appsv1alpha1.WorkloadType { - return appsv1alpha1.Replication -} - -func (r *replicationSet) getReplicas() int32 { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Replicas - } - return r.ComponentSpec.Replicas -} - -// IsRunning is the implementation of the type Component interface method, -// which is used to check whether the replicationSet component is running normally. -func (r *replicationSet) IsRunning(ctx context.Context, obj client.Object) (bool, error) { - var componentStatusIsRunning = true - sts := convertToStatefulSet(obj) - isRevisionConsistent, err := isStsAndPodsRevisionConsistent(ctx, r.Cli, sts) - if err != nil { - return false, err - } - stsIsReady := statefulSetOfComponentIsReady(sts, isRevisionConsistent, nil) - if !stsIsReady { - return false, nil - } - if sts.Status.AvailableReplicas < r.getReplicas() { - componentStatusIsRunning = false - } - return componentStatusIsRunning, nil -} - -// PodsReady is the implementation of the type Component interface method, -// which is used to check whether all the pods of replicationSet component are ready. -func (r *replicationSet) PodsReady(ctx context.Context, obj client.Object) (bool, error) { - return r.stateful.PodsReady(ctx, obj) -} - -// PodIsAvailable is the implementation of the type Component interface method, -// Check whether the status of a Pod of the replicationSet is ready, including the role label on the Pod -func (r *replicationSet) PodIsAvailable(pod *corev1.Pod, minReadySeconds int32) bool { - if pod == nil { - return false - } - return intctrlutil.PodIsReadyWithLabel(*pod) -} - -func (r *replicationSet) GetPhaseWhenPodsReadyAndProbeTimeout(pods []*corev1.Pod) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap) { - return "", nil -} - -// GetPhaseWhenPodsNotReady is the implementation of the type Component interface method, -// when the pods of replicationSet are not ready, calculate the component phase is Failed or Abnormal. -// if return an empty phase, means the pods of component are ready and skips it. -func (r *replicationSet) GetPhaseWhenPodsNotReady(ctx context.Context, - componentName string, - originPhaseIsUpRunning bool) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap, error) { - stsList := &appsv1.StatefulSetList{} - podList, err := getCompRelatedObjectList(ctx, r.Cli, *r.Cluster, - componentName, stsList) - if err != nil || len(stsList.Items) == 0 { - return "", nil, err - } - stsObj := stsList.Items[0] - podCount := len(podList.Items) - componentReplicas := r.getReplicas() - if podCount == 0 || stsObj.Status.AvailableReplicas == 0 { - return getPhaseWithNoAvailableReplicas(componentReplicas), nil, nil - } - // get the statefulSet of component - var ( - existLatestRevisionFailedPod bool - primaryIsReady bool - statusMessages = appsv1alpha1.ComponentMessageMap{} - ) - for _, v := range podList.Items { - // if the pod is terminating, ignore it - if v.DeletionTimestamp != nil { - return "", nil, nil - } - labelValue := v.Labels[constant.RoleLabelKey] - if labelValue == constant.Primary && intctrlutil.PodIsReady(&v) { - primaryIsReady = true - continue - } - if labelValue == "" { - statusMessages.SetObjectMessage(v.Kind, v.Name, "empty label for pod, please check.") - } - // if component is up running but pod is not ready, this pod should be failed. - // for example: full disk cause readiness probe failed and serve is not available. - // but kubelet only sets the container is not ready and pod is also Running. - if originPhaseIsUpRunning && !intctrlutil.PodIsReady(&v) && intctrlutil.PodIsControlledByLatestRevision(&v, &stsObj) { - existLatestRevisionFailedPod = true - continue - } - isFailed, _, message := IsPodFailedAndTimedOut(&v) - if isFailed && intctrlutil.PodIsControlledByLatestRevision(&v, &stsObj) { - existLatestRevisionFailedPod = true - statusMessages.SetObjectMessage(v.Kind, v.Name, message) - } - } - return getCompPhaseByConditions(existLatestRevisionFailedPod, primaryIsReady, - componentReplicas, int32(podCount), stsObj.Status.AvailableReplicas), statusMessages, nil -} - -// HandleRestart is the implementation of the type Component interface method, which is used to handle the restart of the Replication workload. -func (r *replicationSet) HandleRestart(ctx context.Context, obj client.Object) ([]graph.Vertex, error) { - if r.getWorkloadType() != appsv1alpha1.Replication { - return nil, nil - } - priorityMapperFn := func(component *appsv1alpha1.ClusterComponentDefinition) map[string]int { - return composeReplicationRolePriorityMap() - } - return r.HandleUpdateWithStrategy(ctx, obj, nil, priorityMapperFn, generateReplicationSerialPlan, generateReplicationBestEffortParallelPlan, generateReplicationParallelPlan) -} - -// HandleRoleChange is the implementation of the type Component interface method, which is used to handle the role change of the Replication workload. -func (r *replicationSet) HandleRoleChange(ctx context.Context, obj client.Object) ([]graph.Vertex, error) { - podList, err := getRunningPods(ctx, r.Cli, obj) - if err != nil { - return nil, err - } - if len(podList) == 0 { - return nil, nil - } - primaryPods := make([]string, 0) - emptyRolePods := make([]string, 0) - vertexes := make([]graph.Vertex, 0) - for _, pod := range podList { - role, ok := pod.Labels[constant.RoleLabelKey] - if !ok || role == "" { - emptyRolePods = append(emptyRolePods, pod.Name) - continue - } - if role == constant.Primary { - primaryPods = append(primaryPods, pod.Name) - } - } - - for i := range podList { - pod := &podList[i] - needUpdate := false - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - switch { - case len(emptyRolePods) == len(podList): - // if the workload is newly created, and the role label is not set, we set the pod with index=0 as the primary by default. - needUpdate = handlePrimaryNotExistPod(pod) - default: - if len(primaryPods) != 1 { - return nil, errors.New(fmt.Sprintf("the number of primary pod is not equal to 1, primary pods: %v, emptyRole pods: %v", primaryPods, emptyRolePods)) - } - needUpdate = handlePrimaryExistPod(pod, primaryPods[0]) - } - if needUpdate { - vertexes = append(vertexes, &ictrltypes.LifecycleVertex{ - Obj: pod, - Action: ictrltypes.ActionPatchPtr(), - }) - } - } - // rebuild cluster.status.components.replicationSet.status - if err := rebuildReplicationSetClusterStatus(r.Cluster, r.getWorkloadType(), r.getName(), podList); err != nil { - return nil, err - } - return vertexes, nil -} - -// handlePrimaryNotExistPod is used to handle the pod which is not exists primary pod. -func handlePrimaryNotExistPod(pod *corev1.Pod) bool { - parent, o := ParseParentNameAndOrdinal(pod.Name) - defaultRole := DefaultRole(o) - pod.GetLabels()[constant.RoleLabelKey] = defaultRole - pod.Annotations[constant.PrimaryAnnotationKey] = fmt.Sprintf("%s-%d", parent, 0) - return true -} - -// handlePrimaryExistPod is used to handle the pod which is exists primary pod. -func handlePrimaryExistPod(pod *corev1.Pod, primary string) bool { - needPatch := false - if pod.Name != primary { - role, ok := pod.Labels[constant.RoleLabelKey] - if !ok || role == "" { - pod.GetLabels()[constant.RoleLabelKey] = constant.Secondary - needPatch = true - } - } - pk, ok := pod.Annotations[constant.PrimaryAnnotationKey] - if !ok || pk != primary { - pod.Annotations[constant.PrimaryAnnotationKey] = primary - needPatch = true - } - return needPatch -} - -// DefaultRole is used to get the default role of the Pod of the Replication workload. -func DefaultRole(i int32) string { - role := constant.Secondary - if i == 0 { - role = constant.Primary - } - return role -} - -// newReplicationSet is the constructor of the type replicationSet. -func newReplicationSet(cli client.Client, - cluster *appsv1alpha1.Cluster, - spec *appsv1alpha1.ClusterComponentSpec, - def appsv1alpha1.ClusterComponentDefinition) *replicationSet { - return &replicationSet{ - stateful: stateful{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: nil, - ComponentSpec: spec, - ComponentDef: &def, - }, - }, - } -} diff --git a/controllers/apps/components/replication_set_test.go b/controllers/apps/components/replication_set_test.go deleted file mode 100644 index 095db7ceb56..00000000000 --- a/controllers/apps/components/replication_set_test.go +++ /dev/null @@ -1,282 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" -) - -var _ = Describe("Replication Component", func() { - var ( - clusterName = "test-cluster-repl" - clusterDefName = "test-cluster-def-repl" - clusterVersionName = "test-cluster-version-repl" - controllerRivision = "mock-revision" - ) - - var ( - clusterDefObj *appsv1alpha1.ClusterDefinition - clusterVersionObj *appsv1alpha1.ClusterVersion - clusterObj *appsv1alpha1.Cluster - ) - - cleanAll := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - // delete cluster(and all dependent sub-resources), clusterversion and clusterdef - testapps.ClearClusterResources(&testCtx) - - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced resources - testapps.ClearResources(&testCtx, intctrlutil.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - - AfterEach(cleanAll) - - Context("Replication Component test", func() { - It("Replication Component test", func() { - - By("Create a clusterDefinition obj with replication workloadType.") - replicationSpec := &appsv1alpha1.ReplicationSetSpec{ - StatefulSetSpec: appsv1alpha1.StatefulSetSpec{ - UpdateStrategy: appsv1alpha1.SerialStrategy, - }, - } - clusterDefObj = testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.ReplicationRedisComponent, testapps.DefaultRedisCompDefName). - AddReplicationSpec(replicationSpec). - Create(&testCtx).GetObject() - - By("Create a clusterVersion obj with replication workloadType.") - clusterVersionObj = testapps.NewClusterVersionFactory(clusterVersionName, clusterDefObj.Name). - AddComponentVersion(testapps.DefaultRedisCompDefName).AddContainerShort(testapps.DefaultRedisContainerName, testapps.DefaultRedisImageName). - Create(&testCtx).GetObject() - - By("Creating a cluster with replication workloadType.") - clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, - clusterDefObj.Name, clusterVersionObj.Name).WithRandomName(). - AddComponent(testapps.DefaultRedisCompSpecName, testapps.DefaultRedisCompDefName). - SetReplicas(testapps.DefaultReplicationReplicas). - Create(&testCtx).GetObject() - - // mock cluster is Running - Expect(testapps.ChangeObjStatus(&testCtx, clusterObj, func() { - clusterObj.Status.Components = map[string]appsv1alpha1.ClusterComponentStatus{ - testapps.DefaultRedisCompSpecName: { - Phase: appsv1alpha1.RunningClusterCompPhase, - }, - } - })).Should(Succeed()) - - By("Creating statefulSet of replication workloadType.") - replicas := int32(2) - status := appsv1.StatefulSetStatus{ - AvailableReplicas: replicas, - ObservedGeneration: 1, - Replicas: replicas, - ReadyReplicas: replicas, - UpdatedReplicas: replicas, - CurrentRevision: controllerRivision, - UpdateRevision: controllerRivision, - } - - replicationSetSts := testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, - clusterObj.Name+"-"+testapps.DefaultRedisCompSpecName, clusterObj.Name, testapps.DefaultRedisCompSpecName). - AddContainer(corev1.Container{Name: testapps.DefaultRedisContainerName, Image: testapps.DefaultRedisImageName}). - AddAppInstanceLabel(clusterObj.Name). - AddAppComponentLabel(testapps.DefaultRedisCompSpecName). - AddAppManangedByLabel(). - SetReplicas(replicas). - Create(&testCtx).GetObject() - stsObjectKey := client.ObjectKey{Name: replicationSetSts.Name, Namespace: testCtx.DefaultNamespace} - - Expect(replicationSetSts.Spec.VolumeClaimTemplates).Should(BeEmpty()) - - compDefName := clusterObj.Spec.GetComponentDefRefName(testapps.DefaultRedisCompSpecName) - componentDef := clusterDefObj.GetComponentDefByName(compDefName) - componentSpec := clusterObj.Spec.GetComponentByName(testapps.DefaultRedisCompSpecName) - replicationComponent := newReplicationSet(k8sClient, clusterObj, componentSpec, *componentDef) - var podList []*corev1.Pod - - for _, availableReplica := range []int32{0, replicas} { - status.AvailableReplicas = availableReplica - replicationSetSts.Status = status - testk8s.PatchStatefulSetStatus(&testCtx, replicationSetSts.Name, status) - - if availableReplica > 0 { - // Create pods of the statefulset - stsPods := testapps.MockReplicationComponentPods(nil, testCtx, replicationSetSts, clusterObj.Name, - testapps.DefaultRedisCompSpecName, map[int32]string{ - 0: constant.Primary, - 1: constant.Secondary, - }) - podList = append(podList, stsPods...) - By("Testing pods are ready") - podsReady, _ := replicationComponent.PodsReady(ctx, replicationSetSts) - Expect(podsReady).Should(BeTrue()) - By("Testing component is running") - isRunning, _ := replicationComponent.IsRunning(ctx, replicationSetSts) - Expect(isRunning).Should(BeTrue()) - } else { - podsReady, _ := replicationComponent.PodsReady(ctx, replicationSetSts) - By("Testing pods are not ready") - Expect(podsReady).Should(BeFalse()) - By("Testing component is not running") - isRunning, _ := replicationComponent.IsRunning(ctx, replicationSetSts) - Expect(isRunning).Should(BeFalse()) - } - } - - // TODO(refactor): probe timed-out pod - // By("Testing handle probe timed out") - // requeue, _ := replicationComponent.HandleProbeTimeoutWhenPodsReady(ctx, nil) - // Expect(requeue == false).Should(BeTrue()) - - By("Testing pod is available") - primaryPod := podList[0] - Expect(replicationComponent.PodIsAvailable(primaryPod, 10)).Should(BeTrue()) - - By("should return empty string if pod of component is only not ready when component is not up running") - pod := podList[1] - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = []corev1.PodCondition{} - })).Should(Succeed()) - status.AvailableReplicas -= 1 - testk8s.PatchStatefulSetStatus(&testCtx, replicationSetSts.Name, status) - phase, _, _ := replicationComponent.GetPhaseWhenPodsNotReady(ctx, testapps.DefaultRedisCompSpecName, false) - Expect(string(phase)).Should(Equal("")) - - By("expect component phase is Abnormal when pod of component is not ready and component is up running") - phase, _, _ = replicationComponent.GetPhaseWhenPodsNotReady(ctx, testapps.DefaultRedisCompSpecName, true) - Expect(phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) - - // mock pod label is empty - Expect(testapps.ChangeObj(&testCtx, primaryPod, func(pod *corev1.Pod) { - pod.Labels[constant.RoleLabelKey] = "" - })).Should(Succeed()) - _, statusMessages, _ := replicationComponent.GetPhaseWhenPodsNotReady(ctx, testapps.DefaultRedisCompSpecName, false) - Expect(statusMessages[fmt.Sprintf("%s/%s", primaryPod.Kind, primaryPod.Name)]). - Should(ContainSubstring("empty label for pod, please check")) - - // mock primary pod failed - testk8s.UpdatePodStatusScheduleFailed(ctx, testCtx, primaryPod.Name, primaryPod.Namespace) - phase, _, _ = replicationComponent.GetPhaseWhenPodsNotReady(ctx, testapps.DefaultRedisCompSpecName, true) - Expect(phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - - By("Checking if the pod is not updated when statefulSet is not updated") - Expect(testCtx.Cli.Get(testCtx.Ctx, stsObjectKey, replicationSetSts)).Should(Succeed()) - vertexes, err := replicationComponent.HandleRestart(ctx, replicationSetSts) - Expect(err).To(Succeed()) - Expect(len(vertexes)).To(Equal(0)) - pods, err := GetPodListByStatefulSet(ctx, k8sClient, replicationSetSts) - Expect(err).To(Succeed()) - Expect(len(pods)).To(Equal(int(replicas))) - Expect(isStsAndPodsRevisionConsistent(ctx, k8sClient, replicationSetSts)).Should(BeTrue()) - - By("Checking if the pod is deleted when statefulSet is updated and UpdateStrategy is SerialStrategy") - status.UpdateRevision = "new-mock-revision" - testk8s.PatchStatefulSetStatus(&testCtx, replicationSetSts.Name, status) - Expect(testCtx.Cli.Get(testCtx.Ctx, stsObjectKey, replicationSetSts)).Should(Succeed()) - vertexes, err = replicationComponent.HandleRestart(ctx, replicationSetSts) - Expect(err).To(Succeed()) - Expect(len(vertexes)).To(Equal(1)) - Expect(*vertexes[0].(*ictrltypes.LifecycleVertex).Action == ictrltypes.DELETE).To(BeTrue()) - - By("Checking if the pod is deleted when statefulSet is updated and UpdateStrategy is BestEffortParallelStrategy") - Expect(testapps.ChangeObj(&testCtx, clusterDefObj, func(clusterDef *appsv1alpha1.ClusterDefinition) { - clusterDef.Spec.ComponentDefs[0].ReplicationSpec = &appsv1alpha1.ReplicationSetSpec{ - StatefulSetSpec: appsv1alpha1.StatefulSetSpec{ - UpdateStrategy: appsv1alpha1.BestEffortParallelStrategy, - }, - } - })).Should(Succeed()) - status.UpdateRevision = "new-mock-revision-2" - testk8s.PatchStatefulSetStatus(&testCtx, replicationSetSts.Name, status) - Expect(testCtx.Cli.Get(testCtx.Ctx, stsObjectKey, replicationSetSts)).Should(Succeed()) - vertexes, err = replicationComponent.HandleRestart(ctx, replicationSetSts) - Expect(err).To(Succeed()) - Expect(len(vertexes)).To(Equal(1)) - Expect(*vertexes[0].(*ictrltypes.LifecycleVertex).Action == ictrltypes.DELETE).To(BeTrue()) - - By("Checking if the pod is deleted when statefulSet is updated and UpdateStrategy is ParallelStrategy") - Expect(testapps.ChangeObj(&testCtx, clusterDefObj, func(clusterDef *appsv1alpha1.ClusterDefinition) { - clusterDef.Spec.ComponentDefs[0].ReplicationSpec = &appsv1alpha1.ReplicationSetSpec{ - StatefulSetSpec: appsv1alpha1.StatefulSetSpec{ - UpdateStrategy: appsv1alpha1.ParallelStrategy, - }, - } - })).Should(Succeed()) - status.UpdateRevision = "new-mock-revision-2" - testk8s.PatchStatefulSetStatus(&testCtx, replicationSetSts.Name, status) - Expect(testCtx.Cli.Get(testCtx.Ctx, stsObjectKey, replicationSetSts)).Should(Succeed()) - vertexes, err = replicationComponent.HandleRestart(ctx, replicationSetSts) - Expect(err).To(Succeed()) - Expect(len(vertexes)).To(Equal(2)) - Expect(*vertexes[0].(*ictrltypes.LifecycleVertex).Action == ictrltypes.DELETE).To(BeTrue()) - - By("Test handleRoleChange when statefulSet Pod with role label but without primary annotation") - Expect(testapps.ChangeObj(&testCtx, primaryPod, func(pod *corev1.Pod) { - pod.Labels[constant.RoleLabelKey] = constant.Primary - })).Should(Succeed()) - status.UpdateRevision = "new-mock-revision-for-role-change" - testk8s.PatchStatefulSetStatus(&testCtx, replicationSetSts.Name, status) - Expect(testCtx.Cli.Get(testCtx.Ctx, stsObjectKey, replicationSetSts)).Should(Succeed()) - vertexes, err = replicationComponent.HandleRoleChange(ctx, replicationSetSts) - Expect(err).To(Succeed()) - Expect(len(vertexes)).To(Equal(int(replicas))) - Expect(*vertexes[0].(*ictrltypes.LifecycleVertex).Action == ictrltypes.PATCH).To(BeTrue()) - - By("Test handleRoleChange when statefulSet h-scale out a new Pod with no role label") - status.Replicas = 3 - status.AvailableReplicas = 3 - status.ReadyReplicas = 3 - testk8s.PatchStatefulSetStatus(&testCtx, replicationSetSts.Name, status) - Expect(testCtx.Cli.Get(testCtx.Ctx, stsObjectKey, replicationSetSts)).Should(Succeed()) - newPodName := fmt.Sprintf("%s-%d", replicationSetSts.Name, 2) - _ = testapps.MockReplicationComponentPod(nil, testCtx, replicationSetSts, clusterObj.Name, testapps.DefaultRedisCompSpecName, newPodName, "") - vertexes, err = replicationComponent.HandleRoleChange(ctx, replicationSetSts) - Expect(err).To(Succeed()) - Expect(len(vertexes)).To(Equal(3)) - Expect(*vertexes[0].(*ictrltypes.LifecycleVertex).Action == ictrltypes.PATCH).To(BeTrue()) - }) - }) -}) diff --git a/controllers/apps/components/replication_set_utils.go b/controllers/apps/components/replication_set_utils.go deleted file mode 100644 index ed117121417..00000000000 --- a/controllers/apps/components/replication_set_utils.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "fmt" - "time" - - "github.com/google/go-cmp/cmp" - "golang.org/x/exp/slices" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - "github.com/apecloud/kubeblocks/internal/generics" -) - -// rebuildReplicationSetClusterStatus syncs replicationSet pod status to cluster.status.component[componentName].ReplicationStatus. -func rebuildReplicationSetClusterStatus(cluster *appsv1alpha1.Cluster, - workloadType appsv1alpha1.WorkloadType, compName string, podList []corev1.Pod) error { - if len(podList) == 0 { - return nil - } - - var oldReplicationStatus *appsv1alpha1.ReplicationSetStatus - if v, ok := cluster.Status.Components[compName]; ok { - oldReplicationStatus = v.ReplicationSetStatus - } - - newReplicationStatus := &appsv1alpha1.ReplicationSetStatus{} - if err := genReplicationSetStatus(newReplicationStatus, podList); err != nil { - return err - } - // if status changed, do update - if !cmp.Equal(newReplicationStatus, oldReplicationStatus) { - if err := initClusterComponentStatusIfNeed(cluster, compName, workloadType); err != nil { - return err - } - componentStatus := cluster.Status.Components[compName] - componentStatus.ReplicationSetStatus = newReplicationStatus - cluster.Status.SetComponentStatus(compName, componentStatus) - } - return nil -} - -// genReplicationSetStatus generates ReplicationSetStatus from podList. -func genReplicationSetStatus(replicationStatus *appsv1alpha1.ReplicationSetStatus, podList []corev1.Pod) error { - for _, pod := range podList { - role := pod.Labels[constant.RoleLabelKey] - if role == "" { - return fmt.Errorf("pod %s has no role label", pod.Name) - } - switch role { - case constant.Primary: - if replicationStatus.Primary.Pod != "" { - return fmt.Errorf("more than one primary pod found") - } - replicationStatus.Primary.Pod = pod.Name - case constant.Secondary: - replicationStatus.Secondaries = append(replicationStatus.Secondaries, appsv1alpha1.ReplicationMemberStatus{ - Pod: pod.Name, - }) - default: - return fmt.Errorf("unknown role %s", role) - } - } - return nil -} - -// updateObjRoleChangedInfo updates the value of the role label and annotation of the object. -func updateObjRoleChangedInfo[T generics.Object, PT generics.PObject[T]]( - ctx context.Context, cli client.Client, event *corev1.Event, obj T, role string) error { - pObj := PT(&obj) - patch := client.MergeFrom(PT(pObj.DeepCopy())) - pObj.GetLabels()[constant.RoleLabelKey] = role - if pObj.GetAnnotations() == nil { - pObj.SetAnnotations(map[string]string{}) - } - pObj.GetAnnotations()[constant.LastRoleSnapshotVersionAnnotationKey] = event.EventTime.Time.Format(time.RFC3339Nano) - if err := cli.Patch(ctx, pObj, patch); err != nil { - return err - } - return nil -} - -// HandleReplicationSetRoleChangeEvent handles the role change event of the replication workload when switchPolicy is Noop. -func HandleReplicationSetRoleChangeEvent(cli client.Client, - reqCtx intctrlutil.RequestCtx, - event *corev1.Event, - cluster *appsv1alpha1.Cluster, - compName string, - pod *corev1.Pod, - newRole string) error { - reqCtx.Log.Info("receive role change event", "podName", pod.Name, "current pod role label", pod.Labels[constant.RoleLabelKey], "new role", newRole) - // if newRole is not Primary or Secondary, ignore it. - if !slices.Contains([]string{constant.Primary, constant.Secondary}, newRole) { - reqCtx.Log.Info("replicationSet new role is invalid, please check", "new role", newRole) - return nil - } - - // if switchPolicy is not Noop, return - clusterCompSpec := getClusterComponentSpecByName(*cluster, compName) - if clusterCompSpec == nil || clusterCompSpec.SwitchPolicy == nil || clusterCompSpec.SwitchPolicy.Type != appsv1alpha1.Noop { - reqCtx.Log.Info("cluster switchPolicy is not Noop, does not support handling role change event", "cluster", cluster.Name) - return nil - } - - // update pod role label with newRole - if err := updateObjRoleChangedInfo(reqCtx.Ctx, cli, event, *pod, newRole); err != nil { - reqCtx.Log.Info("failed to update pod role label", "podName", pod.Name, "newRole", newRole, "err", err) - return err - } - reqCtx.Log.Info("succeed to update pod role label", "podName", pod.Name, "newRole", newRole) - return nil -} - -// composeReplicationRolePriorityMap generates a priority map based on roles. -func composeReplicationRolePriorityMap() map[string]int { - return map[string]int{ - "": emptyReplicationPriority, - constant.Primary: primaryPriority, - constant.Secondary: secondaryPriority, - } -} - -// generateReplicationParallelPlan generates a parallel plan for the replication workload. -// unknown & empty & secondary & primary -func generateReplicationParallelPlan(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int) { - start := plan.Start - for _, pod := range pods { - nextStep := &Step{} - nextStep.Obj = pod - start.NextSteps = append(start.NextSteps, nextStep) - } -} - -// generateReplicationSerialPlan generates a serial plan for the replication workload. -// unknown -> empty -> secondary -> primary -func generateReplicationSerialPlan(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int) { - start := plan.Start - for _, pod := range pods { - nextStep := &Step{} - nextStep.Obj = pod - start.NextSteps = append(start.NextSteps, nextStep) - start = nextStep - } -} - -// generateReplicationBestEffortParallelPlan generates a best effort parallel plan for the replication workload. -// unknown & empty & 1/2 secondaries -> 1/2 secondaries -> primary -func generateReplicationBestEffortParallelPlan(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int) { - start := plan.Start - l := len(pods) - unknownEmptySteps := make([]*Step, 0, l) - secondarySteps := make([]*Step, 0, l) - primarySteps := make([]*Step, 0, l) - - for _, pod := range pods { - role := pod.Labels[constant.RoleLabelKey] - nextStep := &Step{Obj: pod} - switch { - case rolePriorityMap[role] <= emptyReplicationPriority: - unknownEmptySteps = append(unknownEmptySteps, nextStep) - case rolePriorityMap[role] < primaryPriority: - secondarySteps = append(secondarySteps, nextStep) - default: - primarySteps = append(primarySteps, nextStep) - } - } - - // append unknown, empty - if len(unknownEmptySteps) > 0 { - start.NextSteps = append(start.NextSteps, unknownEmptySteps...) - start = start.NextSteps[0] - } - - // append 1/2 secondaries - end := len(secondarySteps) / 2 - if end > 0 { - start.NextSteps = append(start.NextSteps, secondarySteps[:end]...) - start = start.NextSteps[0] - } - - // append the other 1/2 secondaries - if len(secondarySteps) > end { - start.NextSteps = append(start.NextSteps, secondarySteps[end:]...) - start = start.NextSteps[0] - } - - // append primary - if len(primarySteps) > 0 { - start.NextSteps = append(start.NextSteps, primarySteps...) - } -} diff --git a/controllers/apps/components/replication_set_utils_test.go b/controllers/apps/components/replication_set_utils_test.go deleted file mode 100644 index d50e8edb53b..00000000000 --- a/controllers/apps/components/replication_set_utils_test.go +++ /dev/null @@ -1,278 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "fmt" - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/sethvargo/go-password/password" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/builder" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" - lorryutil "github.com/apecloud/kubeblocks/lorry/util" -) - -var _ = Describe("replicationSet Util", func() { - - var ( - clusterName = "test-cluster-repl" - clusterDefName = "test-cluster-def-repl" - clusterVersionName = "test-cluster-version-repl" - ) - - var ( - clusterDefObj *appsv1alpha1.ClusterDefinition - clusterVersionObj *appsv1alpha1.ClusterVersion - clusterObj *appsv1alpha1.Cluster - ) - - cleanAll := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - // delete cluster(and all dependent sub-resources), clusterversion and clusterdef - testapps.ClearClusterResources(&testCtx) - - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced resources - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.StatefulSetSignature, true, inNS, ml) - testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - - AfterEach(cleanAll) - - testHandleReplicationSet := func() { - - By("Creating a cluster with replication workloadType.") - clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, - clusterDefObj.Name, clusterVersionObj.Name).WithRandomName(). - AddComponent(testapps.DefaultRedisCompSpecName, testapps.DefaultRedisCompDefName). - SetReplicas(testapps.DefaultReplicationReplicas). - Create(&testCtx).GetObject() - - By("Creating a statefulSet of replication workloadType.") - container := corev1.Container{ - Name: "mock-redis-container", - Image: testapps.DefaultRedisImageName, - ImagePullPolicy: corev1.PullIfNotPresent, - } - sts := testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, - clusterObj.Name+"-"+testapps.DefaultRedisCompSpecName, clusterObj.Name, testapps.DefaultRedisCompSpecName). - AddFinalizers([]string{constant.DBClusterFinalizerName}). - AddContainer(container). - AddAppInstanceLabel(clusterObj.Name). - AddAppComponentLabel(testapps.DefaultRedisCompSpecName). - AddAppManangedByLabel(). - SetReplicas(2). - Create(&testCtx).GetObject() - - By("Creating Pods of replication workloadType.") - for i := int32(0); i < *sts.Spec.Replicas; i++ { - _ = testapps.NewPodFactory(testCtx.DefaultNamespace, fmt.Sprintf("%s-%d", sts.Name, i)). - AddContainer(container). - AddLabelsInMap(sts.Labels). - AddRoleLabel(DefaultRole(i)). - Create(&testCtx).GetObject() - } - } - - testNeedUpdateReplicationSetStatus := func() { - By("Creating a cluster with replication workloadType.") - clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, - clusterDefObj.Name, clusterVersionObj.Name).WithRandomName(). - AddComponent(testapps.DefaultRedisCompSpecName, testapps.DefaultRedisCompDefName).Create(&testCtx).GetObject() - - By("init replicationSet cluster status") - patch := client.MergeFrom(clusterObj.DeepCopy()) - clusterObj.Status.Phase = appsv1alpha1.RunningClusterPhase - clusterObj.Status.Components = map[string]appsv1alpha1.ClusterComponentStatus{ - testapps.DefaultRedisCompSpecName: { - Phase: appsv1alpha1.RunningClusterCompPhase, - ReplicationSetStatus: &appsv1alpha1.ReplicationSetStatus{ - Primary: appsv1alpha1.ReplicationMemberStatus{ - Pod: clusterObj.Name + testapps.DefaultRedisCompSpecName + "-0", - }, - Secondaries: []appsv1alpha1.ReplicationMemberStatus{ - { - Pod: clusterObj.Name + testapps.DefaultRedisCompSpecName + "-1", - }, - { - Pod: clusterObj.Name + testapps.DefaultRedisCompSpecName + "-2", - }, - }, - }, - }, - } - Expect(k8sClient.Status().Patch(context.Background(), clusterObj, patch)).Should(Succeed()) - - By("testing sync cluster status with add pod") - - var podList []corev1.Pod - sts := testk8s.NewFakeStatefulSet(clusterObj.Name+testapps.DefaultRedisCompSpecName, 4) - - for i := int32(0); i < *sts.Spec.Replicas; i++ { - pod := testapps.NewPodFactory(testCtx.DefaultNamespace, fmt.Sprintf("%s-%d", sts.Name, i)). - AddContainer(corev1.Container{Name: testapps.DefaultRedisContainerName, Image: testapps.DefaultRedisImageName}). - AddRoleLabel(DefaultRole(i)). - Create(&testCtx).GetObject() - podList = append(podList, *pod) - } - err := genReplicationSetStatus(clusterObj.Status.Components[testapps.DefaultRedisCompSpecName].ReplicationSetStatus, podList) - Expect(err).ShouldNot(Succeed()) - Expect(err.Error()).Should(ContainSubstring("more than one primary pod found")) - - newReplicationStatus := &appsv1alpha1.ReplicationSetStatus{} - Expect(genReplicationSetStatus(newReplicationStatus, podList)).Should(Succeed()) - Expect(len(newReplicationStatus.Secondaries)).Should(Equal(3)) - } - - createRoleChangedEvent := func(podName, role string, podUid types.UID) *corev1.Event { - seq, _ := password.Generate(16, 16, 0, true, true) - objectRef := corev1.ObjectReference{ - APIVersion: "v1", - Kind: "Pod", - Namespace: testCtx.DefaultNamespace, - Name: podName, - UID: podUid, - } - eventName := strings.Join([]string{podName, seq}, ".") - return builder.NewEventBuilder(testCtx.DefaultNamespace, eventName). - SetInvolvedObject(objectRef). - SetMessage(fmt.Sprintf("{\"event\":\"roleChanged\",\"originalRole\":\"secondary\",\"role\":\"%s\"}", role)). - SetReason(string(lorryutil.CheckRoleOperation)). - SetType(corev1.EventTypeNormal). - SetFirstTimestamp(metav1.NewTime(time.Now())). - SetLastTimestamp(metav1.NewTime(time.Now())). - GetObject() - } - - testHandleReplicationSetRoleChangeEvent := func() { - By("Creating a cluster with replication workloadType.") - clusterSwitchPolicy := &appsv1alpha1.ClusterSwitchPolicy{ - Type: appsv1alpha1.Noop, - } - clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, - clusterDefObj.Name, clusterVersionObj.Name).WithRandomName(). - AddComponent(testapps.DefaultRedisCompSpecName, testapps.DefaultRedisCompDefName). - SetReplicas(testapps.DefaultReplicationReplicas). - SetSwitchPolicy(clusterSwitchPolicy). - Create(&testCtx).GetObject() - - By("Creating a statefulSet of replication workloadType.") - container := corev1.Container{ - Name: "mock-redis-container", - Image: testapps.DefaultRedisImageName, - ImagePullPolicy: corev1.PullIfNotPresent, - } - sts := testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, - clusterObj.Name+"-"+testapps.DefaultRedisCompSpecName, clusterObj.Name, testapps.DefaultRedisCompSpecName). - AddContainer(container). - AddAppInstanceLabel(clusterObj.Name). - AddAppComponentLabel(testapps.DefaultRedisCompSpecName). - AddAppManangedByLabel(). - SetReplicas(2). - Create(&testCtx).GetObject() - - By("Creating Pods of replication workloadType.") - var ( - primaryPod *corev1.Pod - secondaryPods []*corev1.Pod - ) - for i := int32(0); i < *sts.Spec.Replicas; i++ { - pod := testapps.NewPodFactory(testCtx.DefaultNamespace, fmt.Sprintf("%s-%d", sts.Name, i)). - AddContainer(container). - AddLabelsInMap(sts.Labels). - AddRoleLabel(DefaultRole(i)). - Create(&testCtx).GetObject() - if pod.Labels[constant.RoleLabelKey] == constant.Primary { - primaryPod = pod - } else { - secondaryPods = append(secondaryPods, pod) - } - } - Expect(primaryPod).ShouldNot(BeNil()) - Expect(secondaryPods).ShouldNot(BeEmpty()) - - By("Test update replicationSet pod role label with event driver, secondary change to primary.") - reqCtx := intctrlutil.RequestCtx{ - Ctx: testCtx.Ctx, - Log: log.FromContext(ctx).WithValues("event", testCtx.DefaultNamespace), - } - event := createRoleChangedEvent(secondaryPods[0].Name, constant.Primary, secondaryPods[0].UID) - Expect(HandleReplicationSetRoleChangeEvent(k8sClient, reqCtx, event, clusterObj, testapps.DefaultRedisCompSpecName, - secondaryPods[0], constant.Primary)).ShouldNot(HaveOccurred()) - - By("Test when secondary change to primary, the old primary label has been updated at the same time, so return nil directly.") - event = createRoleChangedEvent(primaryPod.Name, constant.Secondary, primaryPod.UID) - Expect(HandleReplicationSetRoleChangeEvent(k8sClient, reqCtx, event, clusterObj, testapps.DefaultRedisCompSpecName, - primaryPod, constant.Secondary)).ShouldNot(HaveOccurred()) - } - - // Scenarios - - Context("test replicationSet util", func() { - BeforeEach(func() { - By("Create a clusterDefinition obj with replication workloadType.") - clusterDefObj = testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.ReplicationRedisComponent, testapps.DefaultRedisCompDefName). - Create(&testCtx).GetObject() - - By("Create a clusterVersion obj with replication workloadType.") - clusterVersionObj = testapps.NewClusterVersionFactory(clusterVersionName, clusterDefObj.GetName()). - AddComponentVersion(testapps.DefaultRedisCompDefName).AddContainerShort(testapps.DefaultRedisContainerName, testapps.DefaultRedisImageName). - Create(&testCtx).GetObject() - - }) - - It("Test handReplicationSet with different conditions", func() { - testHandleReplicationSet() - }) - - It("Test need update replicationSet status when horizontal scaling adds pod or removes pod", func() { - testNeedUpdateReplicationSetStatus() - }) - - It("Test update pod role label by roleChangedEvent when ha switch", func() { - testHandleReplicationSetRoleChangeEvent() - }) - }) -}) diff --git a/controllers/apps/components/replication_workload.go b/controllers/apps/components/replication_workload.go deleted file mode 100644 index 073ab8e78d9..00000000000 --- a/controllers/apps/components/replication_workload.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "github.com/apecloud/kubeblocks/internal/controller/factory" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/apecloud/kubeblocks/internal/constant" -) - -type replicationComponentWorkloadBuilder struct { - componentWorkloadBuilderBase -} - -var _ componentWorkloadBuilder = &replicationComponentWorkloadBuilder{} - -func (b *replicationComponentWorkloadBuilder) BuildWorkload() componentWorkloadBuilder { - return b.BuildWorkload4StatefulSet("replication") -} - -func (b *replicationComponentWorkloadBuilder) BuildService() componentWorkloadBuilder { - buildFn := func() ([]client.Object, error) { - svcList := factory.BuildSvcList(b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent()) - objs := make([]client.Object, 0, len(svcList)) - for _, svc := range svcList { - svc.Spec.Selector[constant.RoleLabelKey] = constant.Primary - objs = append(objs, svc) - } - return objs, nil - } - return b.BuildWrapper(buildFn) -} diff --git a/controllers/apps/components/rsm.go b/controllers/apps/components/rsm.go deleted file mode 100644 index fe8eb6ddf57..00000000000 --- a/controllers/apps/components/rsm.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -type rsmComponent struct { - rsmComponentBase -} - -var _ Component = &rsmComponent{} - -const workloadType = "RSM" - -func newRSMComponent(cli client.Client, - recorder record.EventRecorder, - cluster *appsv1alpha1.Cluster, - clusterVersion *appsv1alpha1.ClusterVersion, - synthesizedComponent *component.SynthesizedComponent, - dag *graph.DAG) *rsmComponent { - comp := &rsmComponent{ - rsmComponentBase: rsmComponentBase{ - componentBase: componentBase{ - Client: cli, - Recorder: recorder, - Cluster: cluster, - ClusterVersion: clusterVersion, - Component: synthesizedComponent, - ComponentSet: &RSM{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: synthesizedComponent, - ComponentSpec: nil, - ComponentDef: nil, - }, - }, - Dag: dag, - WorkloadVertex: nil, - }, - }, - } - return comp -} - -func (c *rsmComponent) newBuilder(reqCtx intctrlutil.RequestCtx, cli client.Client, - action *ictrltypes.LifecycleAction) componentWorkloadBuilder { - builder := &rsmComponentWorkloadBuilder{ - componentWorkloadBuilderBase: componentWorkloadBuilderBase{ - ReqCtx: reqCtx, - Client: cli, - Comp: c, - DefaultAction: action, - Error: nil, - EnvConfig: nil, - Workload: nil, - }, - } - builder.ConcreteBuilder = builder - return builder -} - -func (c *rsmComponent) GetWorkloadType() appsv1alpha1.WorkloadType { - return workloadType -} - -func (c *rsmComponent) GetBuiltObjects(reqCtx intctrlutil.RequestCtx, cli client.Client) ([]client.Object, error) { - return c.rsmComponentBase.GetBuiltObjects(c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *rsmComponent) Create(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.rsmComponentBase.Create(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *rsmComponent) Update(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.rsmComponentBase.Update(reqCtx, cli, c.newBuilder(reqCtx, cli, nil)) -} - -func (c *rsmComponent) Status(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.rsmComponentBase.Status(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionNoopPtr())) -} diff --git a/controllers/apps/components/rsm_set.go b/controllers/apps/components/rsm_set.go deleted file mode 100644 index f7a86efba24..00000000000 --- a/controllers/apps/components/rsm_set.go +++ /dev/null @@ -1,337 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "fmt" - "time" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubectl/pkg/util/podutils" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" - rsmcore "github.com/apecloud/kubeblocks/internal/controller/rsm" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -type RSM struct { - componentSetBase -} - -var _ componentSet = &RSM{} - -func (r *RSM) getName() string { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Name - } - return r.ComponentSpec.Name -} - -func (r *RSM) getReplicas() int32 { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Replicas - } - return r.ComponentSpec.Replicas -} - -func (r *RSM) IsRunning(ctx context.Context, obj client.Object) (bool, error) { - if obj == nil { - return false, nil - } - rsm, ok := obj.(*workloads.ReplicatedStateMachine) - if !ok { - return false, nil - } - if isLatestRevision, err := IsComponentPodsWithLatestRevision(ctx, r.Cli, r.Cluster, rsm); err != nil { - return false, err - } else if !isLatestRevision { - return false, nil - } - - // whether rsm is ready - return rsmcore.IsRSMReady(rsm), nil -} - -func (r *RSM) PodsReady(ctx context.Context, obj client.Object) (bool, error) { - if obj == nil { - return false, nil - } - rsm, ok := obj.(*workloads.ReplicatedStateMachine) - if !ok { - return false, nil - } - sts := ConvertRSMToSTS(rsm) - return statefulSetPodsAreReady(sts, r.getReplicas()), nil -} - -func (r *RSM) PodIsAvailable(pod *corev1.Pod, minReadySeconds int32) bool { - switch { - case pod == nil: - return false - case !podutils.IsPodAvailable(pod, minReadySeconds, metav1.Time{Time: time.Now()}): - return false - case r.SynthesizedComponent.WorkloadType == appsv1alpha1.Consensus, - r.SynthesizedComponent.WorkloadType == appsv1alpha1.Replication: - return intctrlutil.PodIsReadyWithLabel(*pod) - default: - return true - } -} - -func (r *RSM) GetPhaseWhenPodsReadyAndProbeTimeout(pods []*corev1.Pod) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap) { - if r.SynthesizedComponent.WorkloadType != appsv1alpha1.Consensus { - return "", nil - } - - var ( - isAbnormal bool - isFailed = true - statusMessages appsv1alpha1.ComponentMessageMap - ) - getProbes := func() *appsv1alpha1.ClusterDefinitionProbes { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Probes - } - return r.ComponentDef.Probes - } - getConsensusSpec := func() *appsv1alpha1.ConsensusSetSpec { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.ConsensusSpec - } - return r.ComponentDef.ConsensusSpec - } - compStatus, ok := r.Cluster.Status.Components[r.getName()] - if !ok || compStatus.PodsReadyTime == nil { - return "", nil - } - if !isProbeTimeout(getProbes(), compStatus.PodsReadyTime) { - return "", nil - } - for _, pod := range pods { - role := pod.Labels[constant.RoleLabelKey] - if role == getConsensusSpec().Leader.Name { - isFailed = false - } - if role == "" { - isAbnormal = true - statusMessages.SetObjectMessage(pod.Kind, pod.Name, "Role probe timeout, check whether the application is available") - } - // TODO clear up the message of ready pod in component.message. - } - switch { - case isFailed: - return appsv1alpha1.FailedClusterCompPhase, statusMessages - case isAbnormal: - return appsv1alpha1.AbnormalClusterCompPhase, statusMessages - default: - return "", statusMessages - } -} - -// GetPhaseWhenPodsNotReady gets the component phase when the pods of component are not ready. -func (r *RSM) GetPhaseWhenPodsNotReady(ctx context.Context, - componentName string, - originPhaseIsUpRunning bool) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap, error) { - rsmList := &workloads.ReplicatedStateMachineList{} - podList, err := getCompRelatedObjectList(ctx, r.Cli, *r.Cluster, componentName, rsmList) - if err != nil || len(rsmList.Items) == 0 { - return "", nil, err - } - statusMessages := appsv1alpha1.ComponentMessageMap{} - // if the failed pod is not controlled by the latest revision - podIsControlledByLatestRevision := func(pod *corev1.Pod, rsm *workloads.ReplicatedStateMachine) bool { - return rsm.Status.ObservedGeneration == rsm.Generation && intctrlutil.GetPodRevision(pod) == rsm.Status.UpdateRevision - } - checkLeaderIsReady := func(pod *corev1.Pod, workload metav1.Object) bool { - getLeaderRoleName := func() string { - switch r.SynthesizedComponent.WorkloadType { - case appsv1alpha1.Consensus: - return r.SynthesizedComponent.ConsensusSpec.Leader.Name - case appsv1alpha1.Replication: - return constant.Primary - default: - return "" - } - } - leaderRoleName := getLeaderRoleName() - labelValue := pod.Labels[constant.RoleLabelKey] - return labelValue == leaderRoleName && intctrlutil.PodIsReady(pod) - } - checkExistFailedPodOfLatestRevision := func(pod *corev1.Pod, workload metav1.Object) bool { - rsm := workload.(*workloads.ReplicatedStateMachine) - // if component is up running but pod is not ready, this pod should be failed. - // for example: full disk cause readiness probe failed and serve is not available. - // but kubelet only sets the container is not ready and pod is also Running. - if originPhaseIsUpRunning { - return !intctrlutil.PodIsReady(pod) && podIsControlledByLatestRevision(pod, rsm) - } - isFailed, _, message := IsPodFailedAndTimedOut(pod) - existLatestRevisionFailedPod := isFailed && podIsControlledByLatestRevision(pod, rsm) - if existLatestRevisionFailedPod { - statusMessages.SetObjectMessage(pod.Kind, pod.Name, message) - } - return existLatestRevisionFailedPod - } - rsmObj := rsmList.Items[0] - return getComponentPhaseWhenPodsNotReady(podList, &rsmObj, r.getReplicas(), - rsmObj.Status.AvailableReplicas, checkLeaderIsReady, checkExistFailedPodOfLatestRevision), statusMessages, nil -} - -func (r *RSM) HandleRestart(context.Context, client.Object) ([]graph.Vertex, error) { - return nil, nil -} - -func (r *RSM) HandleRoleChange(ctx context.Context, obj client.Object) ([]graph.Vertex, error) { - if r.SynthesizedComponent.WorkloadType != appsv1alpha1.Consensus && - r.SynthesizedComponent.WorkloadType != appsv1alpha1.Replication { - return nil, nil - } - - // update cluster.status.component.consensusSetStatus based on the existences for all pods - componentName := r.getName() - rsmObj, _ := obj.(*workloads.ReplicatedStateMachine) - switch r.SynthesizedComponent.WorkloadType { - case appsv1alpha1.Consensus: - // first, get the old status - var oldConsensusSetStatus *appsv1alpha1.ConsensusSetStatus - if v, ok := r.Cluster.Status.Components[componentName]; ok { - oldConsensusSetStatus = v.ConsensusSetStatus - } - // create the initial status - newConsensusSetStatus := &appsv1alpha1.ConsensusSetStatus{ - Leader: appsv1alpha1.ConsensusMemberStatus{ - Name: "", - Pod: constant.ComponentStatusDefaultPodName, - AccessMode: appsv1alpha1.None, - }, - } - // then, set the new status - setConsensusSetStatusRolesByRSM(newConsensusSetStatus, rsmObj) - // if status changed, do update - if !cmp.Equal(newConsensusSetStatus, oldConsensusSetStatus) { - if err := initClusterComponentStatusIfNeed(r.Cluster, componentName, appsv1alpha1.Consensus); err != nil { - return nil, err - } - componentStatus := r.Cluster.Status.Components[componentName] - componentStatus.ConsensusSetStatus = newConsensusSetStatus - r.Cluster.Status.SetComponentStatus(componentName, componentStatus) - - return nil, nil - } - case appsv1alpha1.Replication: - sts := ConvertRSMToSTS(rsmObj) - podList, err := getRunningPods(ctx, r.Cli, sts) - if err != nil { - return nil, err - } - if len(podList) == 0 { - return nil, nil - } - primaryPods := make([]string, 0) - emptyRolePods := make([]string, 0) - vertexes := make([]graph.Vertex, 0) - for _, pod := range podList { - role, ok := pod.Labels[constant.RoleLabelKey] - if !ok || role == "" { - emptyRolePods = append(emptyRolePods, pod.Name) - continue - } - if role == constant.Primary { - primaryPods = append(primaryPods, pod.Name) - } - } - - for i := range podList { - pod := &podList[i] - needUpdate := false - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - switch { - case len(emptyRolePods) == len(podList): - // if the workload is newly created, and the role label is not set, we set the pod with index=0 as the primary by default. - needUpdate = handlePrimaryNotExistPod(pod) - default: - if len(primaryPods) != 1 { - return nil, fmt.Errorf("the number of primary pod is not equal to 1, primary pods: %v, emptyRole pods: %v", primaryPods, emptyRolePods) - } - needUpdate = handlePrimaryExistPod(pod, primaryPods[0]) - } - if needUpdate { - vertexes = append(vertexes, &ictrltypes.LifecycleVertex{ - Obj: pod, - Action: ictrltypes.ActionPatchPtr(), - }) - } - } - // rebuild cluster.status.components.replicationSet.status - if err := rebuildReplicationSetClusterStatus(r.Cluster, appsv1alpha1.Replication, componentName, podList); err != nil { - return nil, err - } - return vertexes, nil - } - - return nil, nil -} - -func setConsensusSetStatusRolesByRSM(newConsensusSetStatus *appsv1alpha1.ConsensusSetStatus, rsmObj *workloads.ReplicatedStateMachine) { - for _, memberStatus := range rsmObj.Status.MembersStatus { - status := appsv1alpha1.ConsensusMemberStatus{ - Name: memberStatus.Name, - Pod: memberStatus.PodName, - AccessMode: appsv1alpha1.AccessMode(memberStatus.AccessMode), - } - switch { - case memberStatus.IsLeader: - newConsensusSetStatus.Leader = status - case memberStatus.CanVote: - newConsensusSetStatus.Followers = append(newConsensusSetStatus.Followers, status) - default: - newConsensusSetStatus.Learner = &status - } - } -} - -func newRSM(ctx context.Context, - cli client.Client, - cluster *appsv1alpha1.Cluster, - clusterDef *appsv1alpha1.ClusterDefinition, - spec *appsv1alpha1.ClusterComponentSpec, - def appsv1alpha1.ClusterComponentDefinition) *RSM { - reqCtx := intctrlutil.RequestCtx{Log: log.FromContext(ctx).WithValues("rsm-test", def.Name)} - synthesizedComponent, _ := component.BuildComponent(reqCtx, nil, cluster, clusterDef, &def, spec, nil) - return &RSM{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: synthesizedComponent, - }, - } -} diff --git a/controllers/apps/components/rsm_set_test.go b/controllers/apps/components/rsm_set_test.go deleted file mode 100644 index 8f1d262caed..00000000000 --- a/controllers/apps/components/rsm_set_test.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "strconv" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" -) - -var _ = Describe("RSM Component", func() { - var ( - randomStr = testCtx.GetRandomStr() - clusterDefName = "mysql1-clusterdef-" + randomStr - clusterVersionName = "mysql1-clusterversion-" + randomStr - clusterName = "mysql1-" + randomStr - ) - const ( - defaultMinReadySeconds = 10 - rsmCompDefRef = "stateful" - rsmCompName = "stateful" - ) - cleanAll := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - // delete cluster(and all dependent sub-resources), clusterversion and clusterdef - testapps.ClearClusterResources(&testCtx) - - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced resources - testapps.ClearResources(&testCtx, intctrlutil.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - - AfterEach(cleanAll) - - Context("RSM Component test", func() { - It("RSM Component test", func() { - By(" init cluster, statefulSet, pods") - clusterDef, _, cluster := testapps.InitConsensusMysql(&testCtx, clusterDefName, - clusterVersionName, clusterName, rsmCompDefRef, rsmCompName) - rsm := testapps.MockRSMComponent(&testCtx, clusterName, rsmCompName) - Expect(testapps.ChangeObj(&testCtx, rsm, func(machine *workloads.ReplicatedStateMachine) { - annotations := machine.Annotations - if annotations == nil { - annotations = make(map[string]string, 0) - } - annotations[constant.KubeBlocksGenerationKey] = strconv.FormatInt(cluster.Generation, 10) - machine.Annotations = annotations - })).Should(Succeed()) - Expect(testapps.ChangeObjStatus(&testCtx, cluster, func() { - cluster.Status.ObservedGeneration = cluster.Generation - })).Should(Succeed()) - rsmList := &workloads.ReplicatedStateMachineList{} - Eventually(func() bool { - _ = k8sClient.List(ctx, rsmList, client.InNamespace(testCtx.DefaultNamespace), client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: rsmCompName, - }, client.Limit(1)) - return len(rsmList.Items) > 0 - }).Should(BeTrue()) - _ = testapps.MockConsensusComponentStatefulSet(&testCtx, clusterName, rsmCompName) - stsList := &appsv1.StatefulSetList{} - Eventually(func() bool { - _ = k8sClient.List(ctx, stsList, client.InNamespace(testCtx.DefaultNamespace), client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: rsmCompName, - }, client.Limit(1)) - return len(stsList.Items) > 0 - }).Should(BeTrue()) - - By("test pods number of sts is 0") - rsm = &rsmList.Items[0] - clusterComponent := cluster.Spec.GetComponentByName(rsmCompName) - componentDef := clusterDef.GetComponentDefByName(clusterComponent.ComponentDefRef) - rsmComponent := newRSM(testCtx.Ctx, k8sClient, cluster, clusterDef, clusterComponent, *componentDef) - phase, _, _ := rsmComponent.GetPhaseWhenPodsNotReady(ctx, rsmCompName, false) - Expect(phase == appsv1alpha1.FailedClusterCompPhase).Should(BeTrue()) - - By("test pods are not ready") - updateRevision := fmt.Sprintf("%s-%s-%s", clusterName, rsmCompName, "6fdd48d9cd") - sts := &stsList.Items[0] - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - availableReplicas := *sts.Spec.Replicas - 1 - sts.Status.AvailableReplicas = availableReplicas - sts.Status.ReadyReplicas = availableReplicas - sts.Status.Replicas = availableReplicas - sts.Status.ObservedGeneration = 1 - sts.Status.UpdateRevision = updateRevision - })).Should(Succeed()) - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - availableReplicas := *rsm.Spec.Replicas - 1 - rsm.Status.InitReplicas = *rsm.Spec.Replicas - rsm.Status.AvailableReplicas = availableReplicas - rsm.Status.ReadyReplicas = availableReplicas - rsm.Status.Replicas = availableReplicas - rsm.Status.ObservedGeneration = 1 - rsm.Status.CurrentGeneration = 1 - rsm.Status.UpdateRevision = updateRevision - })).Should(Succeed()) - podsReady, _ := rsmComponent.PodsReady(ctx, rsm) - Expect(podsReady).Should(BeFalse()) - - By("create pods of sts") - podList := testapps.MockConsensusComponentPods(&testCtx, sts, clusterName, rsmCompName) - - By("test rsm component is abnormal") - pod := podList[0] - // mock pod is not ready - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = []corev1.PodCondition{} - })).Should(Succeed()) - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(rsm), func(g Gomega, tmpRSM *workloads.ReplicatedStateMachine) { - g.Expect(tmpRSM.Status.AvailableReplicas == *rsm.Spec.Replicas-1).Should(BeTrue()) - })).Should(Succeed()) - - By("should return empty string if pod of component is only not ready when component is not up running") - phase, _, _ = rsmComponent.GetPhaseWhenPodsNotReady(ctx, rsmCompName, false) - Expect(string(phase)).Should(Equal("")) - - By("expect component phase is Failed when pod of component is not ready and component is up running") - phase, _, _ = rsmComponent.GetPhaseWhenPodsNotReady(ctx, rsmCompName, true) - Expect(phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - - By("expect component phase is Failed when pod of component is failed") - testk8s.UpdatePodStatusScheduleFailed(ctx, testCtx, pod.Name, pod.Namespace) - phase, _, _ = rsmComponent.GetPhaseWhenPodsNotReady(ctx, rsmCompName, false) - Expect(phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - - By("not ready pod is not controlled by latest revision, should return empty string") - // mock pod is not controlled by latest revision - Expect(testapps.ChangeObj(&testCtx, pod, func(lpod *corev1.Pod) { - lpod.Labels[appsv1.ControllerRevisionHashLabelKey] = fmt.Sprintf("%s-%s-%s", clusterName, rsmCompName, "5wdsd8d9fs") - })).Should(Succeed()) - phase, _, _ = rsmComponent.GetPhaseWhenPodsNotReady(ctx, rsmCompName, false) - Expect(string(phase)).Should(Equal("")) - // reset updateRevision - Expect(testapps.ChangeObj(&testCtx, pod, func(lpod *corev1.Pod) { - lpod.Labels[appsv1.ControllerRevisionHashLabelKey] = updateRevision - })).Should(Succeed()) - - By("test pod is available") - lastTransTime := metav1.NewTime(time.Now().Add(-1 * (defaultMinReadySeconds + 1) * time.Second)) - testk8s.MockPodAvailable(pod, lastTransTime) - Expect(rsmComponent.PodIsAvailable(pod, defaultMinReadySeconds)).Should(BeTrue()) - - By("test pods are ready") - // mock sts is ready - testk8s.MockStatefulSetReady(sts) - testk8s.MockRSMReady(rsm, podList...) - Eventually(func() bool { - podsReady, _ = rsmComponent.PodsReady(ctx, rsm) - return podsReady - }).Should(BeTrue()) - - By("test component.replicas is inconsistent with rsm.spec.replicas") - oldReplicas := rsmComponent.SynthesizedComponent.Replicas - replicas := int32(4) - rsmComponent.SynthesizedComponent.Replicas = replicas - rsm.Annotations[constant.KubeBlocksGenerationKey] = "new-generation" - isRunning, _ := rsmComponent.IsRunning(ctx, rsm) - Expect(isRunning).Should(BeFalse()) - // reset replicas - rsmComponent.SynthesizedComponent.Replicas = oldReplicas - rsm.Annotations[constant.KubeBlocksGenerationKey] = strconv.FormatInt(cluster.Generation, 10) - - By("test component is running") - isRunning, _ = rsmComponent.IsRunning(ctx, rsm) - Expect(isRunning).Should(BeTrue()) - }) - }) - -}) diff --git a/controllers/apps/components/rsm_workload.go b/controllers/apps/components/rsm_workload.go deleted file mode 100644 index 72039d9fcb6..00000000000 --- a/controllers/apps/components/rsm_workload.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "github.com/apecloud/kubeblocks/internal/controller/factory" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type rsmComponentWorkloadBuilder struct { - componentWorkloadBuilderBase -} - -var _ componentWorkloadBuilder = &rsmComponentWorkloadBuilder{} - -func (b *rsmComponentWorkloadBuilder) BuildWorkload() componentWorkloadBuilder { - buildfn := func() ([]client.Object, error) { - component := b.Comp.GetSynthesizedComponent() - obj, err := factory.BuildRSM(b.ReqCtx, b.Comp.GetCluster(), component, b.EnvConfig.Name) - if err != nil { - return nil, err - } - - b.Workload = obj - - return nil, nil // don't return sts here - } - return b.BuildWrapper(buildfn) -} diff --git a/controllers/apps/components/stateful.go b/controllers/apps/components/stateful.go deleted file mode 100644 index cb4d94cf5bb..00000000000 --- a/controllers/apps/components/stateful.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -func newStatefulComponent(cli client.Client, - recorder record.EventRecorder, - cluster *appsv1alpha1.Cluster, - clusterVersion *appsv1alpha1.ClusterVersion, - synthesizedComponent *component.SynthesizedComponent, - dag *graph.DAG) *statefulComponent { - comp := &statefulComponent{ - statefulComponentBase: statefulComponentBase{ - componentBase: componentBase{ - Client: cli, - Recorder: recorder, - Cluster: cluster, - ClusterVersion: clusterVersion, - Component: synthesizedComponent, - ComponentSet: &stateful{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: synthesizedComponent, - ComponentSpec: nil, - ComponentDef: nil, - }, - }, - Dag: dag, - WorkloadVertex: nil, - }, - }, - } - return comp -} - -type statefulComponent struct { - statefulComponentBase -} - -var _ Component = &statefulComponent{} - -func (c *statefulComponent) newBuilder(reqCtx intctrlutil.RequestCtx, cli client.Client, - action *ictrltypes.LifecycleAction) componentWorkloadBuilder { - builder := &statefulComponentWorkloadBuilder{ - componentWorkloadBuilderBase: componentWorkloadBuilderBase{ - ReqCtx: reqCtx, - Client: cli, - Comp: c, - DefaultAction: action, - Error: nil, - EnvConfig: nil, - Workload: nil, - }, - } - builder.ConcreteBuilder = builder - return builder -} - -func (c *statefulComponent) GetWorkloadType() appsv1alpha1.WorkloadType { - return appsv1alpha1.Stateful -} - -func (c *statefulComponent) GetBuiltObjects(reqCtx intctrlutil.RequestCtx, cli client.Client) ([]client.Object, error) { - return c.statefulComponentBase.GetBuiltObjects(c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *statefulComponent) Create(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Create(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr())) -} - -func (c *statefulComponent) Update(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Update(reqCtx, cli, c.newBuilder(reqCtx, cli, nil)) -} - -func (c *statefulComponent) Status(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return c.statefulComponentBase.Status(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionNoopPtr())) -} diff --git a/controllers/apps/components/stateful_set.go b/controllers/apps/components/stateful_set.go deleted file mode 100644 index 38420fe6304..00000000000 --- a/controllers/apps/components/stateful_set.go +++ /dev/null @@ -1,274 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "errors" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubectl/pkg/util/podutils" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -type stateful struct { - componentSetBase -} - -var _ componentSet = &stateful{} - -func (r *stateful) getReplicas() int32 { - if r.SynthesizedComponent != nil { - return r.SynthesizedComponent.Replicas - } - return r.ComponentSpec.Replicas -} - -func (r *stateful) IsRunning(ctx context.Context, obj client.Object) (bool, error) { - if obj == nil { - return false, nil - } - sts := convertToStatefulSet(obj) - isRevisionConsistent, err := isStsAndPodsRevisionConsistent(ctx, r.Cli, sts) - if err != nil { - return false, err - } - targetReplicas := r.getReplicas() - return statefulSetOfComponentIsReady(sts, isRevisionConsistent, &targetReplicas), nil -} - -func (r *stateful) PodsReady(ctx context.Context, obj client.Object) (bool, error) { - if obj == nil { - return false, nil - } - sts := convertToStatefulSet(obj) - return statefulSetPodsAreReady(sts, r.getReplicas()), nil -} - -func (r *stateful) PodIsAvailable(pod *corev1.Pod, minReadySeconds int32) bool { - if pod == nil { - return false - } - return podutils.IsPodAvailable(pod, minReadySeconds, metav1.Time{Time: time.Now()}) -} - -func (r *stateful) GetPhaseWhenPodsReadyAndProbeTimeout(pods []*corev1.Pod) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap) { - return "", nil -} - -// GetPhaseWhenPodsNotReady gets the component phase when the pods of component are not ready. -func (r *stateful) GetPhaseWhenPodsNotReady(ctx context.Context, - componentName string, - originPhaseIsUpRunning bool) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap, error) { - stsList := &appsv1.StatefulSetList{} - podList, err := getCompRelatedObjectList(ctx, r.Cli, *r.Cluster, componentName, stsList) - if err != nil || len(stsList.Items) == 0 { - return "", nil, err - } - statusMessages := appsv1alpha1.ComponentMessageMap{} - // if the failed pod is not controlled by the latest revision - checkExistFailedPodOfLatestRevision := func(pod *corev1.Pod, workload metav1.Object) bool { - sts := workload.(*appsv1.StatefulSet) - // if component is up running but pod is not ready, this pod should be failed. - // for example: full disk cause readiness probe failed and serve is not available. - // but kubelet only sets the container is not ready and pod is also Running. - if originPhaseIsUpRunning { - return !intctrlutil.PodIsReady(pod) && intctrlutil.PodIsControlledByLatestRevision(pod, sts) - } - isFailed, _, message := IsPodFailedAndTimedOut(pod) - existLatestRevisionFailedPod := isFailed && intctrlutil.PodIsControlledByLatestRevision(pod, sts) - if existLatestRevisionFailedPod { - statusMessages.SetObjectMessage(pod.Kind, pod.Name, message) - } - return existLatestRevisionFailedPod - } - stsObj := stsList.Items[0] - return getComponentPhaseWhenPodsNotReady(podList, &stsObj, r.getReplicas(), - stsObj.Status.AvailableReplicas, nil, checkExistFailedPodOfLatestRevision), statusMessages, nil -} - -func (r *stateful) HandleRestart(context.Context, client.Object) ([]graph.Vertex, error) { - return nil, nil -} - -func (r *stateful) HandleRoleChange(context.Context, client.Object) ([]graph.Vertex, error) { - return nil, nil -} - -// HandleUpdateWithStrategy handles the update of component with strategy. -// REVIEW/TODO: (nashtsai) -// 1. too many args -func (r *stateful) HandleUpdateWithStrategy(ctx context.Context, obj client.Object, - compStatusProcessor func(compDef *appsv1alpha1.ClusterComponentDefinition, pods []corev1.Pod, componentName string) error, - priorityMapper func(component *appsv1alpha1.ClusterComponentDefinition) map[string]int, - serialStrategyHandler, bestEffortParallelStrategyHandler, parallelStrategyHandler func(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int)) ([]graph.Vertex, error) { - if r == nil { - return nil, nil - } - - stsObj := convertToStatefulSet(obj) - // get compDefName from stsObj.name - compDefName := r.Cluster.Spec.GetComponentDefRefName(stsObj.Labels[constant.KBAppComponentLabelKey]) - - // get componentDef from ClusterDefinition by compDefName - componentDef, err := appsv1alpha1.GetComponentDefByCluster(ctx, r.Cli, *r.Cluster, compDefName) - if err != nil { - return nil, err - } - - if componentDef == nil || componentDef.IsStatelessWorkload() { - return nil, nil - } - pods, err := GetPodListByStatefulSet(ctx, r.Cli, stsObj) - if err != nil { - return nil, err - } - - // update cluster.status.component.consensusSetStatus when all pods currently exist - if compStatusProcessor != nil { - componentName := stsObj.Labels[constant.KBAppComponentLabelKey] - if err = compStatusProcessor(componentDef, pods, componentName); err != nil { - return nil, err - } - } - - // prepare to do pods Deletion, that's the only thing we should do, - // the statefulset reconciler will do the rest. - // to simplify the process, we do pods Deletion after statefulset reconciliation done, - // that is stsObj.Generation == stsObj.Status.ObservedGeneration - if stsObj.Generation != stsObj.Status.ObservedGeneration { - return nil, nil - } - - // then we wait for all pods' presence, that is len(pods) == stsObj.Spec.Replicas - // at that point, we have enough info about the previous pods before delete the current one - if len(pods) != int(*stsObj.Spec.Replicas) { - return nil, nil - } - - // we don't check whether pod role label is present: prefer stateful set's Update done than role probing ready - - // generate the pods Deletion plan - podsToDelete := make([]*corev1.Pod, 0) - plan := generateUpdatePlan(stsObj, pods, componentDef, priorityMapper, - serialStrategyHandler, bestEffortParallelStrategyHandler, parallelStrategyHandler, &podsToDelete) - // execute plan - if _, err := plan.WalkOneStep(); err != nil { - return nil, err - } - - vertexes := make([]graph.Vertex, 0) - for _, pod := range podsToDelete { - vertexes = append(vertexes, &ictrltypes.LifecycleVertex{ - Obj: pod, - Action: ictrltypes.ActionDeletePtr(), - Orphan: true, - }) - } - return vertexes, nil -} - -func newStateful(cli client.Client, - cluster *appsv1alpha1.Cluster, - spec *appsv1alpha1.ClusterComponentSpec, - def appsv1alpha1.ClusterComponentDefinition) *stateful { - return &stateful{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: nil, - ComponentSpec: spec, - ComponentDef: &def, - }, - } -} - -// generateConsensusUpdatePlan generates Update plan based on UpdateStrategy -func generateUpdatePlan(stsObj *appsv1.StatefulSet, pods []corev1.Pod, - componentDef *appsv1alpha1.ClusterComponentDefinition, - priorityMapper func(component *appsv1alpha1.ClusterComponentDefinition) map[string]int, - serialStrategyHandler, bestEffortParallelStrategyHandler, parallelStrategyHandler func(plan *Plan, pods []corev1.Pod, rolePriorityMap map[string]int), - podsToDelete *[]*corev1.Pod) *Plan { - stsWorkload := componentDef.GetStatefulSetWorkload() - _, s := stsWorkload.FinalStsUpdateStrategy() - switch s.Type { - case appsv1.RollingUpdateStatefulSetStrategyType, "": - return nil - } - - plan := &Plan{} - plan.Start = &Step{} - plan.WalkFunc = func(obj interface{}) (bool, error) { - pod, ok := obj.(corev1.Pod) - if !ok { - return false, errors.New("wrong type: obj not Pod") - } - - // if DeletionTimestamp is not nil, it is terminating. - if pod.DeletionTimestamp != nil { - return true, nil - } - - // if pod is the latest version, we do nothing - if intctrlutil.GetPodRevision(&pod) == stsObj.Status.UpdateRevision { - // wait until ready - return !intctrlutil.PodIsReadyWithLabel(pod), nil - } - - // delete the pod to trigger associate StatefulSet to re-create it - *podsToDelete = append(*podsToDelete, &pod) - - return true, nil - } - - var rolePriorityMap map[string]int - if priorityMapper != nil { - rolePriorityMap = priorityMapper(componentDef) - SortPods(pods, rolePriorityMap, constant.RoleLabelKey) - } - - // generate plan by UpdateStrategy - switch stsWorkload.GetUpdateStrategy() { - case appsv1alpha1.ParallelStrategy: - if parallelStrategyHandler != nil { - parallelStrategyHandler(plan, pods, rolePriorityMap) - } - case appsv1alpha1.BestEffortParallelStrategy: - if bestEffortParallelStrategyHandler != nil { - bestEffortParallelStrategyHandler(plan, pods, rolePriorityMap) - } - case appsv1alpha1.SerialStrategy: - fallthrough - default: - if serialStrategyHandler != nil { - serialStrategyHandler(plan, pods, rolePriorityMap) - } - } - return plan -} diff --git a/controllers/apps/components/stateful_set_test.go b/controllers/apps/components/stateful_set_test.go deleted file mode 100644 index 09cba54c5a3..00000000000 --- a/controllers/apps/components/stateful_set_test.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" -) - -var _ = Describe("Stateful Component", func() { - var ( - randomStr = testCtx.GetRandomStr() - clusterDefName = "mysql1-clusterdef-" + randomStr - clusterVersionName = "mysql1-clusterversion-" + randomStr - clusterName = "mysql1-" + randomStr - ) - const ( - defaultMinReadySeconds = 10 - statefulCompDefRef = "stateful" - statefulCompName = "stateful" - ) - cleanAll := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - // delete cluster(and all dependent sub-resources), clusterversion and clusterdef - testapps.ClearClusterResources(&testCtx) - - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced resources - testapps.ClearResources(&testCtx, intctrlutil.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - - AfterEach(cleanAll) - - Context("Stateful Component test", func() { - It("Stateful Component test", func() { - By(" init cluster, statefulSet, pods") - clusterDef, _, cluster := testapps.InitConsensusMysql(&testCtx, clusterDefName, - clusterVersionName, clusterName, statefulCompDefRef, statefulCompName) - _ = testapps.MockConsensusComponentStatefulSet(&testCtx, clusterName, statefulCompName) - stsList := &appsv1.StatefulSetList{} - Eventually(func() bool { - _ = k8sClient.List(ctx, stsList, client.InNamespace(testCtx.DefaultNamespace), client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: statefulCompName, - }, client.Limit(1)) - return len(stsList.Items) > 0 - }).Should(BeTrue()) - - By("test pods number of sts is 0") - sts := &stsList.Items[0] - clusterComponent := cluster.Spec.GetComponentByName(statefulCompName) - componentDef := clusterDef.GetComponentDefByName(clusterComponent.ComponentDefRef) - stateful := newStateful(k8sClient, cluster, clusterComponent, *componentDef) - phase, _, _ := stateful.GetPhaseWhenPodsNotReady(ctx, statefulCompName, false) - Expect(phase == appsv1alpha1.FailedClusterCompPhase).Should(BeTrue()) - - By("test pods are not ready") - updateRevision := fmt.Sprintf("%s-%s-%s", clusterName, statefulCompName, "6fdd48d9cd") - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - availableReplicas := *sts.Spec.Replicas - 1 - sts.Status.AvailableReplicas = availableReplicas - sts.Status.ReadyReplicas = availableReplicas - sts.Status.Replicas = availableReplicas - sts.Status.ObservedGeneration = 1 - sts.Status.UpdateRevision = updateRevision - })).Should(Succeed()) - podsReady, _ := stateful.PodsReady(ctx, sts) - Expect(podsReady).Should(BeFalse()) - - By("create pods of sts") - podList := testapps.MockConsensusComponentPods(&testCtx, sts, clusterName, statefulCompName) - - By("test stateful component is abnormal") - pod := podList[0] - // mock pod is not ready - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = []corev1.PodCondition{} - })).Should(Succeed()) - // mock pod scheduled failure - // testk8s.UpdatePodStatusScheduleFailed(ctx, testCtx, pod.Name, pod.Namespace) - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(sts), func(g Gomega, tmpSts *appsv1.StatefulSet) { - g.Expect(tmpSts.Status.AvailableReplicas == *sts.Spec.Replicas-1).Should(BeTrue()) - })).Should(Succeed()) - - By("should return empty string if pod of component is only not ready when component is not up running") - phase, _, _ = stateful.GetPhaseWhenPodsNotReady(ctx, statefulCompName, false) - Expect(string(phase)).Should(Equal("")) - - By("expect component phase is Failed when pod of component is not ready and component is up running") - phase, _, _ = stateful.GetPhaseWhenPodsNotReady(ctx, statefulCompName, true) - Expect(phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) - - By("expect component phase is Abnormal when pod of component is failed") - testk8s.UpdatePodStatusScheduleFailed(ctx, testCtx, pod.Name, pod.Namespace) - phase, _, _ = stateful.GetPhaseWhenPodsNotReady(ctx, statefulCompName, false) - Expect(phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) - - By("not ready pod is not controlled by latest revision, should return empty string") - // mock pod is not controlled by latest revision - Expect(testapps.ChangeObj(&testCtx, pod, func(lpod *corev1.Pod) { - lpod.Labels[appsv1.ControllerRevisionHashLabelKey] = fmt.Sprintf("%s-%s-%s", clusterName, statefulCompName, "5wdsd8d9fs") - })).Should(Succeed()) - phase, _, _ = stateful.GetPhaseWhenPodsNotReady(ctx, statefulCompName, false) - Expect(string(phase)).Should(Equal("")) - // reset updateRevision - Expect(testapps.ChangeObj(&testCtx, pod, func(lpod *corev1.Pod) { - lpod.Labels[appsv1.ControllerRevisionHashLabelKey] = updateRevision - })).Should(Succeed()) - - By("test pod is available") - lastTransTime := metav1.NewTime(time.Now().Add(-1 * (defaultMinReadySeconds + 1) * time.Second)) - testk8s.MockPodAvailable(pod, lastTransTime) - Expect(stateful.PodIsAvailable(pod, defaultMinReadySeconds)).Should(BeTrue()) - - By("test pods are ready") - // mock sts is ready - testk8s.MockStatefulSetReady(sts) - podsReady, _ = stateful.PodsReady(ctx, sts) - Expect(podsReady).Should(BeTrue()) - - By("test component.replicas is inconsistent with sts.spec.replicas") - oldReplicas := clusterComponent.Replicas - replicas := int32(4) - clusterComponent.Replicas = replicas - isRunning, _ := stateful.IsRunning(ctx, sts) - Expect(isRunning).Should(BeFalse()) - // reset replicas - clusterComponent.Replicas = oldReplicas - - By("test component is running") - isRunning, _ = stateful.IsRunning(ctx, sts) - Expect(isRunning).Should(BeTrue()) - - // TODO(refactor): probe timed-out pod - // By("test handle probe timed out") - // requeue, _ := stateful.HandleProbeTimeoutWhenPodsReady(ctx, nil) - // Expect(requeue == false).Should(BeTrue()) - }) - }) - -}) diff --git a/controllers/apps/components/stateful_workload.go b/controllers/apps/components/stateful_workload.go deleted file mode 100644 index acf6819a453..00000000000 --- a/controllers/apps/components/stateful_workload.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -type statefulComponentWorkloadBuilder struct { - componentWorkloadBuilderBase -} - -var _ componentWorkloadBuilder = &statefulComponentWorkloadBuilder{} - -func (b *statefulComponentWorkloadBuilder) BuildWorkload() componentWorkloadBuilder { - return b.BuildWorkload4StatefulSet("stateful") -} diff --git a/controllers/apps/components/stateless.go b/controllers/apps/components/stateless.go deleted file mode 100644 index 4b0492363ac..00000000000 --- a/controllers/apps/components/stateless.go +++ /dev/null @@ -1,304 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "reflect" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/component" - "github.com/apecloud/kubeblocks/internal/controller/graph" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -func newStatelessComponent(cli client.Client, - recorder record.EventRecorder, - cluster *appsv1alpha1.Cluster, - clusterVersion *appsv1alpha1.ClusterVersion, - synthesizedComponent *component.SynthesizedComponent, - dag *graph.DAG) *statelessComponent { - comp := &statelessComponent{ - componentBase: componentBase{ - Client: cli, - Recorder: recorder, - Cluster: cluster, - ClusterVersion: clusterVersion, - Component: synthesizedComponent, - ComponentSet: &stateless{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: synthesizedComponent, - ComponentSpec: nil, - ComponentDef: nil, - }, - }, - Dag: dag, - WorkloadVertex: nil, - }, - } - return comp -} - -type statelessComponent struct { - componentBase - // runningWorkload can be nil, and the replicas of workload can be nil (zero) - runningWorkload *appsv1.Deployment -} - -var _ Component = &statelessComponent{} - -func (c *statelessComponent) newBuilder(reqCtx intctrlutil.RequestCtx, cli client.Client, - action *ictrltypes.LifecycleAction) componentWorkloadBuilder { - builder := &statelessComponentWorkloadBuilder{ - componentWorkloadBuilderBase: componentWorkloadBuilderBase{ - ReqCtx: reqCtx, - Client: cli, - Comp: c, - DefaultAction: action, - Error: nil, - EnvConfig: nil, - Workload: nil, - }, - } - builder.ConcreteBuilder = builder - return builder -} - -func (c *statelessComponent) init(reqCtx intctrlutil.RequestCtx, cli client.Client, builder componentWorkloadBuilder, load bool) error { - var err error - if builder != nil { - if err = builder.BuildEnv(). - BuildWorkload(). - BuildPDB(). - BuildHeadlessService(). - BuildConfig(). - BuildTLSVolume(). - BuildVolumeMount(). - BuildService(). - BuildTLSCert(). - Complete(); err != nil { - return err - } - } - if load { - c.runningWorkload, err = c.loadRunningWorkload(reqCtx, cli) - if err != nil { - return err - } - } - return nil -} - -func (c *statelessComponent) loadRunningWorkload(reqCtx intctrlutil.RequestCtx, cli client.Client) (*appsv1.Deployment, error) { - deployList, err := listDeployOwnedByComponent(reqCtx.Ctx, cli, c.GetNamespace(), c.GetMatchingLabels()) - if err != nil { - return nil, err - } - cnt := len(deployList) - if cnt == 1 { - return deployList[0], nil - } - if cnt == 0 { - return nil, nil - } else { - return nil, fmt.Errorf("more than one workloads found for the stateless component, cluster: %s, component: %s, cnt: %d", - c.GetClusterName(), c.GetName(), cnt) - } -} - -func (c *statelessComponent) GetWorkloadType() appsv1alpha1.WorkloadType { - return appsv1alpha1.Stateless -} - -func (c *statelessComponent) GetBuiltObjects(reqCtx intctrlutil.RequestCtx, cli client.Client) ([]client.Object, error) { - dag := c.Dag - defer func() { - c.Dag = dag - }() - - c.Dag = graph.NewDAG() - if err := c.init(intctrlutil.RequestCtx{}, nil, c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr()), false); err != nil { - return nil, err - } - - objs := make([]client.Object, 0) - for _, v := range c.Dag.Vertices() { - if vv, ok := v.(*ictrltypes.LifecycleVertex); ok { - objs = append(objs, vv.Obj) - } - } - return objs, nil -} - -func (c *statelessComponent) Create(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - if err := c.init(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionCreatePtr()), false); err != nil { - return err - } - - if err := c.ValidateObjectsAction(); err != nil { - return err - } - - c.SetStatusPhase(appsv1alpha1.CreatingClusterCompPhase, nil, "Create a new component") - - return nil -} - -func (c *statelessComponent) Delete(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - // TODO(impl): delete component owned resources - return nil -} - -func (c *statelessComponent) Update(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - if err := c.init(reqCtx, cli, c.newBuilder(reqCtx, cli, nil), true); err != nil { - return err - } - - if c.runningWorkload != nil { - if err := c.Restart(reqCtx, cli); err != nil { - return err - } - - // cluster.spec.componentSpecs[*].volumeClaimTemplates[*].spec.resources.requests[corev1.ResourceStorage] - if err := c.ExpandVolume(reqCtx, cli); err != nil { - return err - } - - // cluster.spec.componentSpecs[*].replicas - if err := c.HorizontalScale(reqCtx, cli); err != nil { - return err - } - } - - if err := c.updateUnderlyingResources(reqCtx, cli, c.runningWorkload); err != nil { - return err - } - - return c.ResolveObjectsAction(reqCtx, cli) -} - -func (c *statelessComponent) Status(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - if err := c.init(reqCtx, cli, c.newBuilder(reqCtx, cli, ictrltypes.ActionNoopPtr()), true); err != nil { - return err - } - if c.runningWorkload == nil { - return nil - } - - // patch the current componentSpec workload's custom labels - if err := updateCustomLabelToPods(reqCtx.Ctx, cli, c.Cluster, c.Component, c.Dag); err != nil { - reqCtx.Event(c.Cluster, corev1.EventTypeWarning, "Component Workload Controller PatchWorkloadCustomLabelFailed", err.Error()) - return err - } - - return c.componentBase.StatusWorkload(reqCtx, cli, c.runningWorkload, nil) -} - -func (c *statelessComponent) ExpandVolume(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return nil -} - -func (c *statelessComponent) HorizontalScale(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - if c.runningWorkload.Spec.Replicas == nil && c.Component.Replicas > 0 { - reqCtx.Recorder.Eventf(c.Cluster, - corev1.EventTypeNormal, - "HorizontalScale", - "start horizontal scale component %s of cluster %s from %d to %d", - c.GetName(), c.GetClusterName(), 0, c.Component.Replicas) - } else if c.runningWorkload.Spec.Replicas != nil && *c.runningWorkload.Spec.Replicas != c.Component.Replicas { - reqCtx.Recorder.Eventf(c.Cluster, - corev1.EventTypeNormal, - "HorizontalScale", - "start horizontal scale component %s of cluster %s from %d to %d", - c.GetName(), c.GetClusterName(), *c.runningWorkload.Spec.Replicas, c.Component.Replicas) - } - return nil -} - -func (c *statelessComponent) Restart(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return restartPod(&c.runningWorkload.Spec.Template) -} - -func (c *statelessComponent) Reconfigure(reqCtx intctrlutil.RequestCtx, cli client.Client) error { - return nil // TODO(impl) -} - -func (c *statelessComponent) updateUnderlyingResources(reqCtx intctrlutil.RequestCtx, cli client.Client, deployObj *appsv1.Deployment) error { - if deployObj == nil { - c.createWorkload() - } else { - c.updateWorkload(deployObj) - } - if err := c.UpdatePDB(reqCtx, cli); err != nil { - return err - } - if err := c.UpdateService(reqCtx, cli); err != nil { - return err - } - // update KB___ env needed by pod to obtain hostname. - c.updatePodEnvConfig() - return nil -} - -func (c *statelessComponent) createWorkload() { - deployProto := c.WorkloadVertex.Obj.(*appsv1.Deployment) - c.WorkloadVertex.Obj = deployProto - c.WorkloadVertex.Action = ictrltypes.ActionCreatePtr() - c.SetStatusPhase(appsv1alpha1.UpdatingClusterCompPhase, nil, "Component workload created") -} - -func (c *statelessComponent) updateWorkload(deployObj *appsv1.Deployment) { - deployObjCopy := deployObj.DeepCopy() - deployProto := c.WorkloadVertex.Obj.(*appsv1.Deployment) - - mergeAnnotations(deployObj.Spec.Template.Annotations, &deployProto.Spec.Template.Annotations) - buildWorkLoadAnnotations(deployObjCopy, c.Cluster) - deployObjCopy.Spec = deployProto.Spec - - resolvePodSpecDefaultFields(deployObj.Spec.Template.Spec, &deployObjCopy.Spec.Template.Spec) - - delayUpdatePodSpecSystemFields(deployObj.Spec.Template.Spec, &deployObjCopy.Spec.Template.Spec) - - if !reflect.DeepEqual(&deployObj.Spec, &deployObjCopy.Spec) { - updatePodSpecSystemFields(&deployObjCopy.Spec.Template.Spec) - c.WorkloadVertex.Obj = deployObjCopy - c.WorkloadVertex.Action = ictrltypes.ActionUpdatePtr() - c.SetStatusPhase(appsv1alpha1.UpdatingClusterCompPhase, nil, "Component workload updated") - } -} - -func (c *statelessComponent) updatePodEnvConfig() { - for _, v := range ictrltypes.FindAll[*corev1.ConfigMap](c.Dag) { - node := v.(*ictrltypes.LifecycleVertex) - // TODO: need a way to reference the env config. - envConfigName := fmt.Sprintf("%s-%s-env", c.GetClusterName(), c.GetName()) - if node.Obj.GetName() == envConfigName { - node.Action = ictrltypes.ActionUpdatePtr() - } - } -} diff --git a/controllers/apps/components/stateless_set.go b/controllers/apps/components/stateless_set.go deleted file mode 100644 index 3738625988b..00000000000 --- a/controllers/apps/components/stateless_set.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "context" - "math" - "strings" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - deploymentutil "k8s.io/kubectl/pkg/util/deployment" - "k8s.io/kubectl/pkg/util/podutils" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -// NewRSAvailableReason is added in a deployment when its newest replica set is made available -// ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds -// is at least the minimum available pods that need to run for the deployment. -const NewRSAvailableReason = "NewReplicaSetAvailable" - -type stateless struct { - componentSetBase -} - -var _ componentSet = &stateless{} - -func (stateless *stateless) getReplicas() int32 { - if stateless.SynthesizedComponent != nil { - return stateless.SynthesizedComponent.Replicas - } - return stateless.ComponentSpec.Replicas -} - -func (stateless *stateless) IsRunning(ctx context.Context, obj client.Object) (bool, error) { - if stateless == nil { - return false, nil - } - return stateless.PodsReady(ctx, obj) -} - -func (stateless *stateless) PodsReady(ctx context.Context, obj client.Object) (bool, error) { - if stateless == nil { - return false, nil - } - deploy, ok := obj.(*appsv1.Deployment) - if !ok { - return false, nil - } - targetReplicas := stateless.getReplicas() - return deploymentIsReady(deploy, &targetReplicas), nil -} - -func (stateless *stateless) PodIsAvailable(pod *corev1.Pod, minReadySeconds int32) bool { - if stateless == nil || pod == nil { - return false - } - return podutils.IsPodAvailable(pod, minReadySeconds, metav1.Time{Time: time.Now()}) -} - -func (stateless *stateless) GetPhaseWhenPodsReadyAndProbeTimeout(pods []*corev1.Pod) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap) { - return "", nil -} - -// GetPhaseWhenPodsNotReady gets the component phase when the pods of component are not ready. -func (stateless *stateless) GetPhaseWhenPodsNotReady(ctx context.Context, - componentName string, - originPhaseIsUpRunning bool) (appsv1alpha1.ClusterComponentPhase, appsv1alpha1.ComponentMessageMap, error) { - deployList := &appsv1.DeploymentList{} - podList, err := getCompRelatedObjectList(ctx, stateless.Cli, *stateless.Cluster, componentName, deployList) - if err != nil || len(deployList.Items) == 0 { - return "", nil, err - } - statusMessages := appsv1alpha1.ComponentMessageMap{} - // if the failed pod is not controlled by the new ReplicaSetKind - checkExistFailedPodOfNewRS := func(pod *corev1.Pod, workload metav1.Object) bool { - d := workload.(*appsv1.Deployment) - // if component is up running but pod is not ready, this pod should be failed. - // for example: full disk cause readiness probe failed and serve is not available. - // but kubelet only sets the container is not ready and pod is also Running. - if originPhaseIsUpRunning { - return !intctrlutil.PodIsReady(pod) && belongToNewReplicaSet(d, pod) - } - isFailed, _, message := IsPodFailedAndTimedOut(pod) - existLatestRevisionFailedPod := isFailed && belongToNewReplicaSet(d, pod) - if existLatestRevisionFailedPod { - statusMessages.SetObjectMessage(pod.Kind, pod.Name, message) - } - return existLatestRevisionFailedPod - } - deploy := &deployList.Items[0] - return getComponentPhaseWhenPodsNotReady(podList, deploy, stateless.getReplicas(), - deploy.Status.AvailableReplicas, nil, checkExistFailedPodOfNewRS), statusMessages, nil -} - -func (stateless *stateless) HandleRestart(context.Context, client.Object) ([]graph.Vertex, error) { - return nil, nil -} - -func (stateless *stateless) HandleRoleChange(context.Context, client.Object) ([]graph.Vertex, error) { - return nil, nil -} - -func newStateless(cli client.Client, - cluster *appsv1alpha1.Cluster, - spec *appsv1alpha1.ClusterComponentSpec, - def appsv1alpha1.ClusterComponentDefinition) *stateless { - return &stateless{ - componentSetBase: componentSetBase{ - Cli: cli, - Cluster: cluster, - SynthesizedComponent: nil, - ComponentSpec: spec, - ComponentDef: &def, - }, - } -} - -// deploymentIsReady checks deployment is ready -func deploymentIsReady(deploy *appsv1.Deployment, targetReplicas *int32) bool { - var ( - componentIsRunning = true - newRSAvailable = true - ) - if targetReplicas == nil { - targetReplicas = deploy.Spec.Replicas - } - - if hasProgressDeadline(deploy) { - // if the deployment.Spec.ProgressDeadlineSeconds exists, we should check if the new replicaSet is available. - // when deployment.Spec.ProgressDeadlineSeconds does not exist, the deployment controller will remove the - // DeploymentProgressing condition. - condition := deploymentutil.GetDeploymentCondition(deploy.Status, appsv1.DeploymentProgressing) - if condition == nil || condition.Reason != NewRSAvailableReason || condition.Status != corev1.ConditionTrue { - newRSAvailable = false - } - } - // check if the deployment of component is updated completely and ready. - if deploy.Status.AvailableReplicas != *targetReplicas || - deploy.Status.Replicas != *targetReplicas || - deploy.Status.ObservedGeneration != deploy.Generation || - deploy.Status.UpdatedReplicas != *targetReplicas || - !newRSAvailable { - componentIsRunning = false - } - return componentIsRunning -} - -// hasProgressDeadline checks if the Deployment d is expected to suffice the reason -// "ProgressDeadlineExceeded" when the Deployment progress takes longer than expected time. -func hasProgressDeadline(d *appsv1.Deployment) bool { - return d.Spec.ProgressDeadlineSeconds != nil && - *d.Spec.ProgressDeadlineSeconds > 0 && - *d.Spec.ProgressDeadlineSeconds != math.MaxInt32 -} - -// belongToNewReplicaSet checks if the pod belongs to the new replicaSet of deployment -func belongToNewReplicaSet(d *appsv1.Deployment, pod *corev1.Pod) bool { - if pod == nil || d == nil { - return false - } - condition := deploymentutil.GetDeploymentCondition(d.Status, appsv1.DeploymentProgressing) - if condition == nil { - return false - } - for _, v := range pod.OwnerReferences { - if v.Kind == constant.ReplicaSetKind && strings.Contains(condition.Message, v.Name) { - return d.Status.ObservedGeneration == d.Generation - } - } - return false -} diff --git a/controllers/apps/components/stateless_set_test.go b/controllers/apps/components/stateless_set_test.go deleted file mode 100644 index a4d142ed9b1..00000000000 --- a/controllers/apps/components/stateless_set_test.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - intctrlutil "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" -) - -var _ = Describe("Stateful Component", func() { - var ( - randomStr = testCtx.GetRandomStr() - clusterDefName = "stateless-definition-" + randomStr - clusterVersionName = "stateless-cluster-version-" + randomStr - clusterName = "stateless-" + randomStr - ) - const ( - statelessCompName = "stateless" - statelessCompDefName = "stateless" - defaultMinReadySeconds = 10 - ) - - cleanAll := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced resources - testapps.ClearResources(&testCtx, intctrlutil.ClusterSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.DeploymentSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - - AfterEach(cleanAll) - - Context("Stateless Component test", func() { - It("Stateless Component test", func() { - By(" init cluster, deployment") - clusterDef := testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.StatelessNginxComponent, statelessCompDefName). - Create(&testCtx).GetObject() - cluster := testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName, clusterVersionName). - AddComponent(statelessCompName, statelessCompDefName).SetReplicas(2).Create(&testCtx).GetObject() - deploy := testapps.MockStatelessComponentDeploy(&testCtx, clusterName, statelessCompName) - clusterComponent := cluster.Spec.GetComponentByName(statelessCompName) - componentDef := clusterDef.GetComponentDefByName(clusterComponent.ComponentDefRef) - statelessComponent := newStateless(k8sClient, cluster, clusterComponent, *componentDef) - By("test pods number of deploy is 0 ") - phase, _, _ := statelessComponent.GetPhaseWhenPodsNotReady(ctx, statelessCompName, false) - Expect(phase == appsv1alpha1.FailedClusterCompPhase).Should(BeTrue()) - - By("test pod is ready") - rsName := deploy.Name + "-5847cb795c" - pod := testapps.MockStatelessPod(&testCtx, deploy, clusterName, statelessCompName, rsName+randomStr) - lastTransTime := metav1.NewTime(time.Now().Add(-1 * (defaultMinReadySeconds + 1) * time.Second)) - testk8s.MockPodAvailable(pod, lastTransTime) - Expect(statelessComponent.PodIsAvailable(pod, defaultMinReadySeconds)).Should(BeTrue()) - - By("test a part pods of deploy are not ready") - // mock pod is not ready - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = []corev1.PodCondition{} - })).Should(Succeed()) - // mock deployment is processing rs - Expect(testapps.ChangeObjStatus(&testCtx, deploy, func() { - deploy.Status.Conditions = []appsv1.DeploymentCondition{ - { - Type: appsv1.DeploymentProgressing, - Reason: "ProcessingRs", - Status: corev1.ConditionTrue, - Message: fmt.Sprintf(`ReplicaSet "%s" has progressing.`, rsName), - }, - } - deploy.Status.ObservedGeneration = 1 - })).Should(Succeed()) - Expect(testapps.ChangeObjStatus(&testCtx, deploy, func() { - availableReplicas := *deploy.Spec.Replicas - 1 - deploy.Status.AvailableReplicas = availableReplicas - deploy.Status.ReadyReplicas = availableReplicas - deploy.Status.Replicas = availableReplicas - })).Should(Succeed()) - podsReady, _ := statelessComponent.PodsReady(ctx, deploy) - Expect(podsReady).Should(BeFalse()) - By("should return empty string if pod of component is only not ready when component is not up running") - phase, _, _ = statelessComponent.GetPhaseWhenPodsNotReady(ctx, statelessCompName, false) - Expect(string(phase)).Should(Equal("")) - - By("expect component phase is Failed when pod of component is not ready and component is up running") - phase, _, _ = statelessComponent.GetPhaseWhenPodsNotReady(ctx, statelessCompName, true) - Expect(phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) - - By("expect component phase is Abnormal when pod of component is failed") - testk8s.UpdatePodStatusScheduleFailed(ctx, testCtx, pod.Name, pod.Namespace) - phase, _, _ = statelessComponent.GetPhaseWhenPodsNotReady(ctx, statelessCompName, false) - Expect(phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) - - By("test pods of deployment are ready") - testk8s.MockDeploymentReady(deploy, NewRSAvailableReason, rsName) - podsReady, _ = statelessComponent.PodsReady(ctx, deploy) - Expect(podsReady).Should(BeTrue()) - - By("test component.replicas is inconsistent with deployment.spec.replicas") - oldReplicas := clusterComponent.Replicas - replicas := int32(4) - clusterComponent.Replicas = replicas - isRunning, _ := statelessComponent.IsRunning(ctx, deploy) - Expect(isRunning).Should(BeFalse()) - // reset replicas - clusterComponent.Replicas = oldReplicas - - By("test component is running") - isRunning, _ = statelessComponent.IsRunning(ctx, deploy) - Expect(isRunning).Should(BeTrue()) - - // TODO(refactor): probe timed-out pod - // By("test handle probe timed out") - // requeue, _ := statelessComponent.HandleProbeTimeoutWhenPodsReady(ctx, nil) - // Expect(requeue == false).Should(BeTrue()) - - By("test pod is not failed and not controlled by new ReplicaSet of deployment") - Expect(testapps.ChangeObjStatus(&testCtx, deploy, func() { - deploy.Status.Conditions = []appsv1.DeploymentCondition{ - { - Type: appsv1.DeploymentProgressing, - Reason: "ProcessingRs", - Status: corev1.ConditionTrue, - Message: fmt.Sprintf(`ReplicaSet "%s" has progressing.`, deploy.Name+"-584f7csdb"), - }, - } - })).Should(Succeed()) - phase, _, _ = statelessComponent.GetPhaseWhenPodsNotReady(ctx, statelessCompName, false) - Expect(string(phase)).Should(Equal("")) - }) - }) - -}) diff --git a/controllers/apps/components/stateless_workload.go b/controllers/apps/components/stateless_workload.go deleted file mode 100644 index f091a9bc70f..00000000000 --- a/controllers/apps/components/stateless_workload.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "github.com/apecloud/kubeblocks/internal/controller/factory" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type statelessComponentWorkloadBuilder struct { - componentWorkloadBuilderBase -} - -var _ componentWorkloadBuilder = &statelessComponentWorkloadBuilder{} - -func (b *statelessComponentWorkloadBuilder) BuildWorkload() componentWorkloadBuilder { - buildfn := func() ([]client.Object, error) { - deploy, err := factory.BuildDeploy(b.ReqCtx, b.Comp.GetCluster(), b.Comp.GetSynthesizedComponent(), b.EnvConfig.Name) - if err != nil { - return nil, err - } - b.Workload = deploy - return nil, nil // don't return deployment here - } - return b.BuildWrapper(buildfn) -} diff --git a/controllers/apps/components/status.go b/controllers/apps/components/status.go deleted file mode 100644 index 0eb6cc4ca94..00000000000 --- a/controllers/apps/components/status.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "sort" - - "golang.org/x/exp/maps" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" -) - -var componentPhasePriority = map[appsv1alpha1.ClusterComponentPhase]int{ - appsv1alpha1.FailedClusterCompPhase: 1, - appsv1alpha1.AbnormalClusterCompPhase: 2, - appsv1alpha1.UpdatingClusterCompPhase: 3, - appsv1alpha1.StoppedClusterCompPhase: 4, - appsv1alpha1.RunningClusterCompPhase: 5, - appsv1alpha1.CreatingClusterCompPhase: 6, -} - -type statusReconciliationTxn struct { - proposals map[appsv1alpha1.ClusterComponentPhase]func() -} - -func (t *statusReconciliationTxn) propose(phase appsv1alpha1.ClusterComponentPhase, mutator func()) { - if t.proposals == nil { - t.proposals = make(map[appsv1alpha1.ClusterComponentPhase]func()) - } - if _, ok := t.proposals[phase]; ok { - return // keep first - } - t.proposals[phase] = mutator -} - -func (t *statusReconciliationTxn) commit() error { - if len(t.proposals) == 0 { - return nil - } - phases := maps.Keys(t.proposals) - sort.Slice(phases, func(i, j int) bool { - return componentPhasePriority[phases[i]] < componentPhasePriority[phases[j]] - }) - t.proposals[phases[0]]() - return nil -} diff --git a/controllers/apps/components/status_test.go b/controllers/apps/components/status_test.go deleted file mode 100644 index 2a8ed5ae4cc..00000000000 --- a/controllers/apps/components/status_test.go +++ /dev/null @@ -1,520 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package components - -import ( - "fmt" - "strconv" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/graph" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" -) - -var _ = Describe("ComponentStatusSynchronizer", func() { - const ( - compName = "comp" - compDefName = "comp" - ) - - var ( - clusterDefName = "test-clusterdef" - clusterVersionName = "test-clusterversion" - clusterName = "test-cluster" - controllerRevision = fmt.Sprintf("%s-%s-%s", clusterName, compName, "6fdd48d9cd1") - - clusterDef *appsv1alpha1.ClusterDefinition - cluster *appsv1alpha1.Cluster - component Component - rsm *workloads.ReplicatedStateMachine - reqCtx *intctrlutil.RequestCtx - dag *graph.DAG - err error - ) - - cleanAll := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - - // non-namespaced resources - testapps.ClearResources(&testCtx, generics.ClusterDefinitionSignature, inNS, ml) - - // namespaced resources - testapps.ClearResources(&testCtx, generics.ClusterSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.DeploymentSignature, inNS, ml) - if intctrlutil.IsRSMEnabled() { - testapps.ClearResources(&testCtx, generics.RSMSignature, inNS, ml) - } - - testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - - AfterEach(cleanAll) - - Context("with stateless component", func() { - BeforeEach(func() { - clusterDef = testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.StatelessNginxComponent, compDefName). - GetObject() - - cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName, clusterVersionName). - AddComponent(compName, compDefName). - SetReplicas(1). - GetObject() - - reqCtx = &intctrlutil.RequestCtx{ - Ctx: ctx, - Log: log.FromContext(ctx).WithValues("cluster", clusterDef.Name), - } - dag = graph.NewDAG() - component, err = NewComponent(*reqCtx, testCtx.Cli, clusterDef, nil, cluster, compName, dag) - Expect(err).Should(Succeed()) - Expect(component).ShouldNot(BeNil()) - }) - - It("should not change component if no deployment or pod exists", func() { - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(BeEmpty()) - }) - - Context("and with mocked deployment & pod", func() { - var ( - deployment *appsv1.Deployment - pod *corev1.Pod - ) - - BeforeEach(func() { - deploymentName := clusterName + "-" + compName - deployment = testapps.NewDeploymentFactory(testCtx.DefaultNamespace, deploymentName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetMinReadySeconds(int32(10)). - SetReplicas(int32(1)). - AddContainer(corev1.Container{Name: testapps.DefaultNginxContainerName, Image: testapps.NginxImage}). - Create(&testCtx).GetObject() - if intctrlutil.IsRSMEnabled() { - rsm = testapps.NewRSMFactory(testCtx.DefaultNamespace, deploymentName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(1)). - AddContainer(corev1.Container{Name: testapps.DefaultNginxContainerName, Image: testapps.NginxImage}). - Create(&testCtx).GetObject() - sts := testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, deploymentName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(1)). - AddContainer(corev1.Container{Name: testapps.DefaultNginxContainerName, Image: testapps.NginxImage}). - Create(&testCtx).GetObject() - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - sts.Status.ObservedGeneration = sts.Generation - sts.Status.UpdateRevision = controllerRevision - })).Should(Succeed()) - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - rsm.Status.InitReplicas = *rsm.Spec.Replicas - rsm.Status.Replicas = *rsm.Spec.Replicas - rsm.Status.UpdateRevision = controllerRevision - rsm.Status.ObservedGeneration = rsm.Generation - rsm.Status.CurrentGeneration = rsm.Generation - })).Should(Succeed()) - } - - podName := fmt.Sprintf("%s-%s-%s", clusterName, compName, testCtx.GetRandomStr()) - if intctrlutil.IsRSMEnabled() { - podName = rsm.Name + "-0" - } - pod = testapps.NewPodFactory(testCtx.DefaultNamespace, podName). - SetOwnerReferences("apps/v1", constant.DeploymentKind, deployment). - AddAppInstanceLabel(clusterName). - AddAppComponentLabel(compName). - AddAppManangedByLabel(). - AddControllerRevisionHashLabel(controllerRevision). - AddContainer(corev1.Container{Name: testapps.DefaultNginxContainerName, Image: testapps.NginxImage}). - Create(&testCtx).GetObject() - }) - - It("should set component status to failed if container is not ready and have error message", func() { - Expect(mockContainerError(pod)).Should(Succeed()) - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - }) - - It("should set component status to running if container is ready", func() { - Expect(testapps.ChangeObjStatus(&testCtx, deployment, func() { - testk8s.MockDeploymentReady(deployment, NewRSAvailableReason, deployment.Name) - })).Should(Succeed()) - if intctrlutil.IsRSMEnabled() { - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, pod) - })).Should(Succeed()) - } - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) - }) - }) - }) - - Context("with statefulset component", func() { - BeforeEach(func() { - clusterDef = testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.StatefulMySQLComponent, compDefName). - GetObject() - - cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName, clusterVersionName). - AddComponent(compName, compDefName). - SetReplicas(int32(3)). - GetObject() - - reqCtx = &intctrlutil.RequestCtx{ - Ctx: ctx, - Log: log.FromContext(ctx).WithValues("cluster", clusterDef.Name), - } - dag = graph.NewDAG() - component, err = NewComponent(*reqCtx, testCtx.Cli, clusterDef, nil, cluster, compName, dag) - Expect(err).Should(Succeed()) - Expect(component).ShouldNot(BeNil()) - }) - - It("should not change component if no statefulset or pod exists", func() { - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(BeEmpty()) - }) - - Context("and with mocked statefulset & pod", func() { - var ( - statefulset *appsv1.StatefulSet - pods []*corev1.Pod - ) - - BeforeEach(func() { - stsName := clusterName + "-" + compName - statefulset = testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, stsName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(3)). - AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). - Create(&testCtx).GetObject() - // init statefulset status - testk8s.InitStatefulSetStatus(testCtx, statefulset, controllerRevision) - for i := 0; i < 3; i++ { - podName := fmt.Sprintf("%s-%s-%d", clusterName, compName, i) - pod := testapps.NewPodFactory(testCtx.DefaultNamespace, podName). - SetOwnerReferences("apps/v1", constant.StatefulSetKind, statefulset). - AddAppInstanceLabel(clusterName). - AddAppComponentLabel(compName). - AddAppManangedByLabel(). - AddControllerRevisionHashLabel(controllerRevision). - AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). - Create(&testCtx).GetObject() - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = []corev1.PodCondition{{ - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }} - })).Should(Succeed()) - pods = append(pods, pod) - } - if intctrlutil.IsRSMEnabled() { - rsm = testapps.NewRSMFactory(testCtx.DefaultNamespace, stsName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(3)). - AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). - Create(&testCtx).GetObject() - // init rsm status - testk8s.InitRSMStatus(testCtx, rsm, controllerRevision) - } - }) - - It("should set component status to failed if container is not ready and have error message", func() { - Expect(mockContainerError(pods[0])).Should(Succeed()) - Expect(mockContainerError(pods[1])).Should(Succeed()) - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - }) - - It("should set component status to running if container is ready", func() { - Expect(testapps.ChangeObjStatus(&testCtx, statefulset, func() { - testk8s.MockStatefulSetReady(statefulset) - })).Should(Succeed()) - if intctrlutil.IsRSMEnabled() { - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, pods...) - })).Should(Succeed()) - } - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) - }) - }) - }) - - Context("with consensusset component", func() { - BeforeEach(func() { - clusterDef = testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.ConsensusMySQLComponent, compDefName). - Create(&testCtx).GetObject() - - cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName, clusterVersionName). - AddComponent(compName, compDefName). - SetReplicas(int32(3)). - Create(&testCtx).GetObject() - - reqCtx = &intctrlutil.RequestCtx{ - Ctx: ctx, - Log: log.FromContext(ctx).WithValues("cluster", clusterDef.Name), - } - dag = graph.NewDAG() - component, err = NewComponent(*reqCtx, testCtx.Cli, clusterDef, nil, cluster, compName, dag) - Expect(err).Should(Succeed()) - Expect(component).ShouldNot(BeNil()) - }) - - It("should not change component if no statefulset or pod exists", func() { - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(BeEmpty()) - }) - - Context("and with mocked statefulset & pod", func() { - var ( - statefulset *appsv1.StatefulSet - pods []*corev1.Pod - ) - - BeforeEach(func() { - stsName := clusterName + "-" + compName - statefulset = testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, stsName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(3)). - AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). - Create(&testCtx).GetObject() - testk8s.InitStatefulSetStatus(testCtx, statefulset, controllerRevision) - for i := 0; i < 3; i++ { - podName := fmt.Sprintf("%s-%s-%d", clusterName, compName, i) - pod := testapps.NewPodFactory(testCtx.DefaultNamespace, podName). - SetOwnerReferences("apps/v1", constant.StatefulSetKind, statefulset). - AddAppInstanceLabel(clusterName). - AddAppComponentLabel(compName). - AddAppManangedByLabel(). - AddControllerRevisionHashLabel(controllerRevision). - AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). - Create(&testCtx).GetObject() - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = []corev1.PodCondition{{ - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }} - })).Should(Succeed()) - pods = append(pods, pod) - } - if intctrlutil.IsRSMEnabled() { - rsm = testapps.NewRSMFactory(testCtx.DefaultNamespace, stsName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(3)). - AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). - Create(&testCtx).GetObject() - testk8s.InitRSMStatus(testCtx, rsm, controllerRevision) - } - Expect(testapps.ChangeObjStatus(&testCtx, cluster, func() { - cluster.Status.ObservedGeneration = cluster.Generation - })).Should(Succeed()) - }) - - It("should set component status to failed if container is not ready and have error message", func() { - Expect(mockContainerError(pods[0])).Should(Succeed()) - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - }) - - It("should set component status to running if container is ready", func() { - Expect(testapps.ChangeObjStatus(&testCtx, statefulset, func() { - testk8s.MockStatefulSetReady(statefulset) - })).Should(Succeed()) - if intctrlutil.IsRSMEnabled() { - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, pods...) - })).Should(Succeed()) - } - - Expect(setPodRole(pods[0], "leader")).Should(Succeed()) - Expect(setPodRole(pods[1], "follower")).Should(Succeed()) - Expect(setPodRole(pods[2], "follower")).Should(Succeed()) - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) - }) - }) - }) - - Context("with replicationset component", func() { - BeforeEach(func() { - clusterDef = testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.ReplicationRedisComponent, compDefName). - Create(&testCtx).GetObject() - - cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName, clusterVersionName). - AddComponent(compName, compDefName). - SetReplicas(2). - Create(&testCtx).GetObject() - - reqCtx = &intctrlutil.RequestCtx{ - Ctx: ctx, - Log: log.FromContext(ctx).WithValues("cluster", clusterDef.Name), - } - dag = graph.NewDAG() - component, err = NewComponent(*reqCtx, testCtx.Cli, clusterDef, nil, cluster, compName, dag) - Expect(err).Should(Succeed()) - Expect(component).ShouldNot(BeNil()) - }) - - It("should not change component if no deployment or pod exists", func() { - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(BeEmpty()) - }) - - Context("and with mocked statefulset & pod", func() { - const ( - replicas = 2 - ) - var ( - statefulset *appsv1.StatefulSet - pods []*corev1.Pod - ) - - BeforeEach(func() { - stsName := clusterName + "-" + compName - statefulset = testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, stsName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(replicas)). - AddContainer(corev1.Container{Name: testapps.DefaultRedisContainerName, Image: testapps.DefaultRedisImageName}). - Create(&testCtx).GetObject() - testk8s.InitStatefulSetStatus(testCtx, statefulset, controllerRevision) - for i := 0; i < replicas; i++ { - podName := fmt.Sprintf("%s-%d", stsName, i) - podRole := "primary" - if i > 0 { - podRole = "secondary" - } - pod := testapps.NewPodFactory(testCtx.DefaultNamespace, podName). - SetOwnerReferences("apps/v1", constant.StatefulSetKind, statefulset). - AddAppInstanceLabel(clusterName). - AddAppComponentLabel(compName). - AddAppManangedByLabel(). - AddRoleLabel(podRole). - AddControllerRevisionHashLabel(controllerRevision). - AddContainer(corev1.Container{Name: testapps.DefaultRedisContainerName, Image: testapps.DefaultRedisImageName}). - Create(&testCtx).GetObject() - patch := client.MergeFrom(pod.DeepCopy()) - pod.Status.Conditions = []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - } - Expect(testCtx.Cli.Status().Patch(testCtx.Ctx, pod, patch)).Should(Succeed()) - pods = append(pods, pod) - } - if intctrlutil.IsRSMEnabled() { - rsm = testapps.NewRSMFactory(testCtx.DefaultNamespace, stsName, clusterName, compName). - AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(cluster.Generation, 10)). - SetReplicas(int32(replicas)). - AddContainer(corev1.Container{Name: testapps.DefaultRedisContainerName, Image: testapps.DefaultRedisImageName}). - Create(&testCtx).GetObject() - testk8s.InitRSMStatus(testCtx, rsm, controllerRevision) - } - Expect(testapps.ChangeObjStatus(&testCtx, cluster, func() { - cluster.Status.ObservedGeneration = cluster.Generation - })).Should(Succeed()) - }) - - It("should set component status to failed if container is not ready and have error message", func() { - Expect(mockContainerError(pods[0])).Should(Succeed()) - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - }) - - It("should set component status to running if container is ready", func() { - Expect(testapps.ChangeObjStatus(&testCtx, statefulset, func() { - testk8s.MockStatefulSetReady(statefulset) - })).Should(Succeed()) - if intctrlutil.IsRSMEnabled() { - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, pods...) - })).Should(Succeed()) - } - - Expect(component.Status(*reqCtx, testCtx.Cli)).Should(Succeed()) - Expect(cluster.Status.Components[compName].Phase).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) - }) - }) - }) -}) - -func mockContainerError(pod *corev1.Pod) error { - return testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Reason: "ImagePullBackOff", - Message: "Back-off pulling image", - }, - }, - }, - } - pod.Status.Conditions = []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.NewTime(time.Now().Add(-2 * time.Minute)), - }, - } - }) -} - -func setPodRole(pod *corev1.Pod, role string) error { - return testapps.ChangeObj(&testCtx, pod, func(lpod *corev1.Pod) { - lpod.Labels[constant.RoleLabelKey] = role - }) -} diff --git a/controllers/apps/components/types.go b/controllers/apps/components/types.go index 98278bd3906..338009fb860 100644 --- a/controllers/apps/components/types.go +++ b/controllers/apps/components/types.go @@ -20,62 +20,98 @@ along with this program. If not, see . package components import ( - "time" - + "context" + "fmt" + + "github.com/apecloud/kubeblocks/internal/class" + "github.com/apecloud/kubeblocks/internal/constant" + types2 "github.com/apecloud/kubeblocks/internal/controller/client" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/plan" "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/controller/component" - ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) -const ( - // ComponentPhaseTransition the event reason indicates that the component transits to a new phase. - ComponentPhaseTransition = "ComponentPhaseTransition" - - // PodContainerFailedTimeout the timeout for container of pod failures, the component phase will be set to Failed/Abnormal after this time. - PodContainerFailedTimeout = 10 * time.Second - - // PodScheduledFailedTimeout timeout for scheduling failure. - PodScheduledFailedTimeout = 30 * time.Second -) - type Component interface { GetName() string GetNamespace() string GetClusterName() string - GetDefinitionName() string - GetWorkloadType() appsv1alpha1.WorkloadType GetCluster() *appsv1alpha1.Cluster GetClusterVersion() *appsv1alpha1.ClusterVersion GetSynthesizedComponent() *component.SynthesizedComponent - GetConsensusSpec() *appsv1alpha1.ConsensusSetSpec - - GetMatchingLabels() client.MatchingLabels - - GetPhase() appsv1alpha1.ClusterComponentPhase - // GetStatus() appsv1alpha1.ClusterComponentStatus - - // GetBuiltObjects returns all objects that will be created by this component - GetBuiltObjects(reqCtx intctrlutil.RequestCtx, cli client.Client) ([]client.Object, error) - Create(reqCtx intctrlutil.RequestCtx, cli client.Client) error Delete(reqCtx intctrlutil.RequestCtx, cli client.Client) error Update(reqCtx intctrlutil.RequestCtx, cli client.Client) error Status(reqCtx intctrlutil.RequestCtx, cli client.Client) error +} - Restart(reqCtx intctrlutil.RequestCtx, cli client.Client) error - - ExpandVolume(reqCtx intctrlutil.RequestCtx, cli client.Client) error - - HorizontalScale(reqCtx intctrlutil.RequestCtx, cli client.Client) error - - // TODO(impl): impl-related, replace them with component workload - SetWorkload(obj client.Object, action *ictrltypes.LifecycleAction, parent *ictrltypes.LifecycleVertex) - AddResource(obj client.Object, action *ictrltypes.LifecycleAction, parent *ictrltypes.LifecycleVertex) *ictrltypes.LifecycleVertex +func NewComponent(reqCtx intctrlutil.RequestCtx, + cli client.Client, + definition *appsv1alpha1.ClusterDefinition, + version *appsv1alpha1.ClusterVersion, + cluster *appsv1alpha1.Cluster, + compName string, + dag *graph.DAG) (Component, error) { + var compDef *appsv1alpha1.ClusterComponentDefinition + var compVer *appsv1alpha1.ClusterComponentVersion + compSpec := cluster.Spec.GetComponentByName(compName) + if compSpec != nil { + compDef = definition.GetComponentDefByName(compSpec.ComponentDefRef) + if compDef == nil { + return nil, fmt.Errorf("referenced component definition does not exist, cluster: %s, component: %s, component definition ref:%s", + cluster.Name, compSpec.Name, compSpec.ComponentDefRef) + } + if version != nil { + compVer = version.Spec.GetDefNameMappingComponents()[compSpec.ComponentDefRef] + } + } else { + compDef = definition.GetComponentDefByName(compName) + if version != nil { + compVer = version.Spec.GetDefNameMappingComponents()[compName] + } + } + + if compDef == nil { + return nil, nil + } + + clsMgr, err := getClassManager(reqCtx.Ctx, cli, cluster) + if err != nil { + return nil, err + } + serviceReferences, err := plan.GenServiceReferences(reqCtx, cli, cluster, compDef, compSpec) + if err != nil { + return nil, err + } + + synthesizedComp, err := component.BuildComponent(reqCtx, clsMgr, cluster, definition, compDef, compSpec, serviceReferences, compVer) + if err != nil { + return nil, err + } + if synthesizedComp == nil { + return nil, nil + } + + return newRSMComponent(cli, reqCtx.Recorder, cluster, version, synthesizedComp, dag), nil } -type ComponentWorkload interface{} +func getClassManager(ctx context.Context, cli types2.ReadonlyClient, cluster *appsv1alpha1.Cluster) (*class.Manager, error) { + var classDefinitionList appsv1alpha1.ComponentClassDefinitionList + ml := []client.ListOption{ + client.MatchingLabels{constant.ClusterDefLabelKey: cluster.Spec.ClusterDefRef}, + } + if err := cli.List(ctx, &classDefinitionList, ml...); err != nil { + return nil, err + } + + var constraintList appsv1alpha1.ComponentResourceConstraintList + if err := cli.List(ctx, &constraintList); err != nil { + return nil, err + } + return class.NewManager(classDefinitionList, constraintList) +} diff --git a/controllers/apps/components/utils.go b/controllers/apps/components/utils.go index d33462343d2..65ea05f4f8c 100644 --- a/controllers/apps/components/utils.go +++ b/controllers/apps/components/utils.go @@ -24,7 +24,6 @@ import ( "errors" "fmt" "reflect" - "sort" "strconv" "strings" "time" @@ -73,14 +72,6 @@ func listRSMOwnedByComponent(ctx context.Context, cli client.Client, namespace s return listObjWithLabelsInNamespace(ctx, cli, generics.RSMSignature, namespace, labels) } -func listStsOwnedByComponent(ctx context.Context, cli client.Client, namespace string, labels client.MatchingLabels) ([]*appsv1.StatefulSet, error) { - return listObjWithLabelsInNamespace(ctx, cli, generics.StatefulSetSignature, namespace, labels) -} - -func listDeployOwnedByComponent(ctx context.Context, cli client.Client, namespace string, labels client.MatchingLabels) ([]*appsv1.Deployment, error) { - return listObjWithLabelsInNamespace(ctx, cli, generics.DeploymentSignature, namespace, labels) -} - func listPodOwnedByComponent(ctx context.Context, cli client.Client, namespace string, labels client.MatchingLabels) ([]*corev1.Pod, error) { return listObjWithLabelsInNamespace(ctx, cli, generics.PodSignature, namespace, labels) } @@ -353,75 +344,6 @@ func getCompRelatedObjectList(ctx context.Context, return podList, nil } -// availableReplicasAreConsistent checks if expected replicas number of component is consistent with -// the number of available workload replicas. -func availableReplicasAreConsistent(componentReplicas, podCount, workloadAvailableReplicas int32) bool { - return workloadAvailableReplicas == componentReplicas && componentReplicas == podCount -} - -// getPhaseWithNoAvailableReplicas gets the component phase when the workload of component has no available replicas. -func getPhaseWithNoAvailableReplicas(componentReplicas int32) appsv1alpha1.ClusterComponentPhase { - if componentReplicas == 0 { - return "" - } - return appsv1alpha1.FailedClusterCompPhase -} - -// getComponentPhaseWhenPodsNotReady gets the component phase when pods of component are not ready. -func getComponentPhaseWhenPodsNotReady(podList *corev1.PodList, - workload metav1.Object, - componentReplicas, - availableReplicas int32, - checkLeaderIsReady func(pod *corev1.Pod, workload metav1.Object) bool, - checkFailedPodRevision func(pod *corev1.Pod, workload metav1.Object) bool) appsv1alpha1.ClusterComponentPhase { - podCount := len(podList.Items) - if podCount == 0 || availableReplicas == 0 { - return getPhaseWithNoAvailableReplicas(componentReplicas) - } - var ( - existLatestRevisionFailedPod bool - leaderIsReady bool - ) - for _, v := range podList.Items { - // if the pod is terminating, ignore it - if v.DeletionTimestamp != nil { - return "" - } - if checkLeaderIsReady == nil || checkLeaderIsReady(&v, workload) { - leaderIsReady = true - } - if checkFailedPodRevision != nil && checkFailedPodRevision(&v, workload) { - existLatestRevisionFailedPod = true - } - } - return getCompPhaseByConditions(existLatestRevisionFailedPod, leaderIsReady, - componentReplicas, int32(podCount), availableReplicas) -} - -// getCompPhaseByConditions gets the component phase according to the following conditions: -// 1. if the failed pod is not controlled by the latest revision, ignore it. -// 2. if the primary replicas are not available, the component is failed. -// 3. finally if expected replicas number of component is inconsistent with -// the number of available workload replicas, the component is abnormal. -func getCompPhaseByConditions(existLatestRevisionFailedPod bool, - primaryReplicasAvailable bool, - compReplicas, - podCount, - availableReplicas int32) appsv1alpha1.ClusterComponentPhase { - // if the failed pod is not controlled by the latest revision, ignore it. - if !existLatestRevisionFailedPod { - return "" - } - if !primaryReplicasAvailable { - return appsv1alpha1.FailedClusterCompPhase - } - // checks if expected replicas number of component is consistent with the number of available workload replicas. - if !availableReplicasAreConsistent(compReplicas, podCount, availableReplicas) { - return appsv1alpha1.AbnormalClusterCompPhase - } - return "" -} - // parseCustomLabelPattern parses the custom label pattern to GroupVersionKind. func parseCustomLabelPattern(pattern string) (schema.GroupVersionKind, error) { patterns := strings.Split(pattern, "/") @@ -442,37 +364,12 @@ func parseCustomLabelPattern(pattern string) (schema.GroupVersionKind, error) { return schema.GroupVersionKind{}, fmt.Errorf("invalid pattern %s", pattern) } -// SortPods sorts pods by their role priority -func SortPods(pods []corev1.Pod, priorityMap map[string]int, idLabelKey string) { - // make a Serial pod list, - // e.g.: unknown -> empty -> learner -> follower1 -> follower2 -> leader, with follower1.Name < follower2.Name - sort.SliceStable(pods, func(i, j int) bool { - roleI := pods[i].Labels[idLabelKey] - roleJ := pods[j].Labels[idLabelKey] - if priorityMap[roleI] == priorityMap[roleJ] { - _, ordinal1 := intctrlutil.GetParentNameAndOrdinal(&pods[i]) - _, ordinal2 := intctrlutil.GetParentNameAndOrdinal(&pods[j]) - return ordinal1 < ordinal2 - } - return priorityMap[roleI] < priorityMap[roleJ] - }) -} - // replaceKBEnvPlaceholderTokens replaces the placeholder tokens in the string strToReplace with builtInEnvMap and return new string. func replaceKBEnvPlaceholderTokens(clusterName, uid, componentName, strToReplace string) string { builtInEnvMap := componentutil.GetReplacementMapForBuiltInEnv(clusterName, uid, componentName) return componentutil.ReplaceNamedVars(builtInEnvMap, strToReplace, -1, true) } -// getRunningPods gets the running pods of the specified statefulSet. -func getRunningPods(ctx context.Context, cli client.Client, obj client.Object) ([]corev1.Pod, error) { - sts := convertToStatefulSet(obj) - if sts == nil || sts.Generation != sts.Status.ObservedGeneration { - return nil, nil - } - return GetPodListByStatefulSet(ctx, cli, sts) -} - // resolvePodSpecDefaultFields set default value for some known fields of proto PodSpec @pobj. func resolvePodSpecDefaultFields(obj corev1.PodSpec, pobj *corev1.PodSpec) { resolveVolume := func(v corev1.Volume, vv *corev1.Volume) { diff --git a/controllers/apps/components/utils_test.go b/controllers/apps/components/utils_test.go index 4571e6f612d..7c1b6ee4857 100644 --- a/controllers/apps/components/utils_test.go +++ b/controllers/apps/components/utils_test.go @@ -40,10 +40,8 @@ import ( "github.com/apecloud/kubeblocks/internal/controller/component" "github.com/apecloud/kubeblocks/internal/controller/graph" "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" ) func TestIsFailedOrAbnormal(t *testing.T) { @@ -65,82 +63,7 @@ func TestIsProbeTimeout(t *testing.T) { } } -func TestGetComponentPhase(t *testing.T) { - var ( - isFailed = true - isAbnormal = true - ) - getComponentPhase := func(isFailed, isAbnormal bool) appsv1alpha1.ClusterComponentPhase { - var componentPhase appsv1alpha1.ClusterComponentPhase - if isFailed { - componentPhase = appsv1alpha1.FailedClusterCompPhase - } else if isAbnormal { - componentPhase = appsv1alpha1.AbnormalClusterCompPhase - } - return componentPhase - } - status := getComponentPhase(isFailed, isAbnormal) - if status != appsv1alpha1.FailedClusterCompPhase { - t.Error("function getComponentPhase should return Failed") - } - isFailed = false - status = getComponentPhase(isFailed, isAbnormal) - if status != appsv1alpha1.AbnormalClusterCompPhase { - t.Error("function getComponentPhase should return Abnormal") - } - isAbnormal = false - status = getComponentPhase(isFailed, isAbnormal) - if status != "" { - t.Error(`function getComponentPhase should return ""`) - } -} - -func TestGetPhaseWithNoAvailableReplicas(t *testing.T) { - status := getPhaseWithNoAvailableReplicas(int32(0)) - if status != "" { - t.Error(`function getComponentPhase should return ""`) - } - status = getPhaseWithNoAvailableReplicas(int32(2)) - if status != appsv1alpha1.FailedClusterCompPhase { - t.Error(`function getComponentPhase should return "Failed"`) - } -} - -func TestAvailableReplicasAreConsistent(t *testing.T) { - isConsistent := availableReplicasAreConsistent(int32(1), int32(1), int32(1)) - if !isConsistent { - t.Error(`function getComponentPhase should return "true"`) - } - isConsistent = availableReplicasAreConsistent(int32(1), int32(2), int32(1)) - if isConsistent { - t.Error(`function getComponentPhase should return "false"`) - } -} - -func TestGetCompPhaseByConditions(t *testing.T) { - existLatestRevisionFailedPod := true - primaryReplicaIsReady := true - phase := getCompPhaseByConditions(existLatestRevisionFailedPod, primaryReplicaIsReady, int32(1), int32(1), int32(1)) - if phase != "" { - t.Error(`function getComponentPhase should return ""`) - } - phase = getCompPhaseByConditions(existLatestRevisionFailedPod, primaryReplicaIsReady, int32(2), int32(1), int32(1)) - if phase != appsv1alpha1.AbnormalClusterCompPhase { - t.Error(`function getComponentPhase should return "Abnormal"`) - } - primaryReplicaIsReady = false - phase = getCompPhaseByConditions(existLatestRevisionFailedPod, primaryReplicaIsReady, int32(2), int32(1), int32(1)) - if phase != appsv1alpha1.FailedClusterCompPhase { - t.Error(`function getComponentPhase should return "Failed"`) - } - existLatestRevisionFailedPod = false - phase = getCompPhaseByConditions(existLatestRevisionFailedPod, primaryReplicaIsReady, int32(2), int32(1), int32(1)) - if phase != "" { - t.Error(`function getComponentPhase should return ""`) - } -} - -var _ = Describe("Consensus Component", func() { +var _ = Describe("Component", func() { var ( randomStr = testCtx.GetRandomStr() clusterDefName = "mysql-clusterdef-" + randomStr @@ -175,8 +98,8 @@ var _ = Describe("Consensus Component", func() { AfterEach(cleanAll) - Context("Consensus Component test", func() { - It("Consensus Component test", func() { + Context("Component test", func() { + It("Component test", func() { By(" init cluster, statefulSet, pods") _, _, cluster := testapps.InitClusterWithHybridComps(&testCtx, clusterDefName, clusterVersionName, clusterName, statelessCompName, "stateful", consensusCompName) @@ -246,38 +169,6 @@ var _ = Describe("Consensus Component", func() { delete(podNoLabel.Labels, constant.KBAppComponentLabelKey) _, _, err = GetComponentInfoByPod(ctx, k8sClient, *cluster, podNoLabel) Expect(err).ShouldNot(Succeed()) - - By("test getComponentPhaseWhenPodsNotReady function") - consensusComp := cluster.Spec.GetComponentByName(consensusCompName) - checkExistFailedPodOfLatestRevision := func(pod *corev1.Pod, workload metav1.Object) bool { - sts := workload.(*appsv1.StatefulSet) - return !intctrlutil.PodIsReady(pod) && intctrlutil.PodIsControlledByLatestRevision(pod, sts) - } - // component phase should be Failed when available replicas is 0 - phase := getComponentPhaseWhenPodsNotReady(podList, sts, consensusComp.Replicas, - sts.Status.AvailableReplicas, nil, checkExistFailedPodOfLatestRevision) - Expect(phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) - - // mock available replicas to component replicas - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - testk8s.MockStatefulSetReady(sts) - })).Should(Succeed()) - phase = getComponentPhaseWhenPodsNotReady(podList, sts, consensusComp.Replicas, - sts.Status.AvailableReplicas, nil, checkExistFailedPodOfLatestRevision) - Expect(len(phase) == 0).Should(BeTrue()) - - // mock component is abnormal - pod := &podList.Items[0] - Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { - pod.Status.Conditions = nil - })).Should(Succeed()) - Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { - sts.Status.AvailableReplicas = *sts.Spec.Replicas - 1 - })).Should(Succeed()) - phase = getComponentPhaseWhenPodsNotReady(podList, sts, consensusComp.Replicas, - sts.Status.AvailableReplicas, nil, checkExistFailedPodOfLatestRevision) - Expect(phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) - }) It("test GetComponentInfoByPod with no cluster componentSpec", func() { diff --git a/controllers/apps/configuration/parallel_upgrade_policy.go b/controllers/apps/configuration/parallel_upgrade_policy.go index 1a8d5045588..951b75df8e7 100644 --- a/controllers/apps/configuration/parallel_upgrade_policy.go +++ b/controllers/apps/configuration/parallel_upgrade_policy.go @@ -20,10 +20,10 @@ along with this program. If not, see . package configuration import ( + cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" corev1 "k8s.io/api/core/v1" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" podutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) @@ -38,14 +38,10 @@ func (p *parallelUpgradePolicy) Upgrade(params reconfigureParams) (ReturnedStatu var funcs RollingUpgradeFuncs switch params.WorkloadType() { + case appsv1alpha1.Consensus, appsv1alpha1.Stateful, appsv1alpha1.Replication: + funcs = GetRSMRollingUpgradeFuncs() default: return makeReturnedStatus(ESNotSupport), cfgcore.MakeError("not supported component workload type[%s]", params.WorkloadType()) - case appsv1alpha1.Consensus: - funcs = GetConsensusRollingUpgradeFuncs() - case appsv1alpha1.Stateful: - funcs = GetStatefulSetRollingUpgradeFuncs() - case appsv1alpha1.Replication: - funcs = GetReplicationRollingUpgradeFuncs() } pods, err := funcs.GetPodsFunc(params) diff --git a/controllers/apps/configuration/policy_util.go b/controllers/apps/configuration/policy_util.go index 4f054c325bb..10f3cacc91d 100644 --- a/controllers/apps/configuration/policy_util.go +++ b/controllers/apps/configuration/policy_util.go @@ -23,7 +23,6 @@ import ( "context" "fmt" "net" - "sort" "strconv" appv1 "k8s.io/api/apps/v1" @@ -31,9 +30,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/apecloud/kubeblocks/controllers/apps/components" + "github.com/apecloud/kubeblocks/internal/common" "github.com/apecloud/kubeblocks/internal/configuration/core" cfgproto "github.com/apecloud/kubeblocks/internal/configuration/proto" "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/rsm" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -57,7 +58,7 @@ func getReplicationSetPods(params reconfigureParams) ([]corev1.Pod, error) { func GetComponentPods(params reconfigureParams) ([]corev1.Pod, error) { componentPods := make([]corev1.Pod, 0) for i := range params.ComponentUnits { - pods, err := components.GetPodListByStatefulSet(params.Ctx.Ctx, params.Client, ¶ms.ComponentUnits[i]) + pods, err := common.GetPodListByStatefulSet(params.Ctx.Ctx, params.Client, ¶ms.ComponentUnits[i]) if err != nil { return nil, err } @@ -82,28 +83,9 @@ func CheckReconfigureUpdateProgress(pods []corev1.Pod, configKey, version string return readyPods } -func getStatefulSetPods(params reconfigureParams) ([]corev1.Pod, error) { - if len(params.ComponentUnits) != 1 { - return nil, core.MakeError("statefulSet component require only one statefulset, actual %d components", len(params.ComponentUnits)) - } - - stsObj := ¶ms.ComponentUnits[0] - pods, err := components.GetPodListByStatefulSet(params.Ctx.Ctx, params.Client, stsObj) - if err != nil { - return nil, err - } - - sort.SliceStable(pods, func(i, j int) bool { - _, ordinal1 := intctrlutil.GetParentNameAndOrdinal(&pods[i]) - _, ordinal2 := intctrlutil.GetParentNameAndOrdinal(&pods[j]) - return ordinal1 < ordinal2 - }) - return pods, nil -} - -func getConsensusPods(params reconfigureParams) ([]corev1.Pod, error) { +func getRSMPods(params reconfigureParams) ([]corev1.Pod, error) { if len(params.ComponentUnits) > 1 { - return nil, core.MakeError("consensus component require only one statefulset, actual %d components", len(params.ComponentUnits)) + return nil, core.MakeError("rsm component require only one statefulset, actual %d components", len(params.ComponentUnits)) } if len(params.ComponentUnits) == 0 { @@ -111,18 +93,16 @@ func getConsensusPods(params reconfigureParams) ([]corev1.Pod, error) { } stsObj := ¶ms.ComponentUnits[0] - pods, err := components.GetPodListByStatefulSet(params.Ctx.Ctx, params.Client, stsObj) + pods, err := common.GetPodListByStatefulSet(params.Ctx.Ctx, params.Client, stsObj) if err != nil { return nil, err } // TODO: should resolve the dependency on consensus module - components.SortPods(pods, components.ComposeRolePriorityMap(params.Component.ConsensusSpec), constant.RoleLabelKey) - r := make([]corev1.Pod, 0, len(pods)) - for i := len(pods); i > 0; i-- { - r = append(r, pods[i-1:i]...) + if params.Component.RSMSpec != nil { + rsm.SortPods(pods, rsm.ComposeRolePriorityMap(params.Component.RSMSpec.Roles), true) } - return r, nil + return pods, nil } // TODO commonOnlineUpdateWithPod migrate to sql command pipeline diff --git a/controllers/apps/configuration/policy_util_test.go b/controllers/apps/configuration/policy_util_test.go index cc07a086c4a..e7635f9e530 100644 --- a/controllers/apps/configuration/policy_util_test.go +++ b/controllers/apps/configuration/policy_util_test.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) @@ -91,6 +92,9 @@ func newMockStatefulSet(replicas int, name string, labels map[string]string) app UID: types.UID(uid), }, Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, Replicas: func() *int32 { i := int32(replicas); return &i }(), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -199,14 +203,20 @@ func withCDComponent(compType appsv1alpha1.WorkloadType, tpls []appsv1alpha1.Com WorkloadType: compType, Name: string(compType), } - if compType == appsv1alpha1.Consensus { - params.Component.ConsensusSpec = &appsv1alpha1.ConsensusSetSpec{ - Leader: appsv1alpha1.ConsensusMember{ - Name: "leader", - }, - Followers: []appsv1alpha1.ConsensusMember{ + if compType == appsv1alpha1.Consensus || compType == appsv1alpha1.Replication { + params.Component.RSMSpec = &appsv1alpha1.RSMSpec{ + Roles: []workloads.ReplicaRole{ + { + Name: "leader", + IsLeader: true, + AccessMode: workloads.ReadWriteMode, + CanVote: true, + }, { - Name: "follower", + Name: "follower", + IsLeader: false, + AccessMode: workloads.ReadonlyMode, + CanVote: true, }, }, } diff --git a/controllers/apps/configuration/rolling_upgrade_policy.go b/controllers/apps/configuration/rolling_upgrade_policy.go index 14c4ba8d8e2..37ec806a884 100644 --- a/controllers/apps/configuration/rolling_upgrade_policy.go +++ b/controllers/apps/configuration/rolling_upgrade_policy.go @@ -53,18 +53,13 @@ func init() { } func (r *rollingUpgradePolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) { - var ( - funcs RollingUpgradeFuncs - cType = params.WorkloadType() - ) + var funcs RollingUpgradeFuncs - switch cType { - case appsv1alpha1.Consensus: - funcs = GetConsensusRollingUpgradeFuncs() - case appsv1alpha1.Stateful: - funcs = GetStatefulSetRollingUpgradeFuncs() + switch params.WorkloadType() { + case appsv1alpha1.Consensus, appsv1alpha1.Replication, appsv1alpha1.Stateful: + funcs = GetRSMRollingUpgradeFuncs() default: - return makeReturnedStatus(ESNotSupport), cfgcore.MakeError("not supported component workload type[%s]", cType) + return makeReturnedStatus(ESNotSupport), cfgcore.MakeError("not supported component workload type[%s]", params.WorkloadType()) } return performRollingUpgrade(params, funcs) } diff --git a/controllers/apps/configuration/simple_policy.go b/controllers/apps/configuration/simple_policy.go index 3933db346eb..17800055735 100644 --- a/controllers/apps/configuration/simple_policy.go +++ b/controllers/apps/configuration/simple_policy.go @@ -43,14 +43,8 @@ func (s *simplePolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) switch params.WorkloadType() { default: return makeReturnedStatus(ESNotSupport), core.MakeError("not supported component workload type:[%s]", params.WorkloadType()) - case appsv1alpha1.Consensus: - funcs = GetConsensusRollingUpgradeFuncs() - compLists = fromStatefulSetObjects(params.ComponentUnits) - case appsv1alpha1.Stateful: - funcs = GetStatefulSetRollingUpgradeFuncs() - compLists = fromStatefulSetObjects(params.ComponentUnits) - case appsv1alpha1.Replication: - funcs = GetReplicationRollingUpgradeFuncs() + case appsv1alpha1.Consensus, appsv1alpha1.Replication, appsv1alpha1.Stateful: + funcs = GetRSMRollingUpgradeFuncs() compLists = fromStatefulSetObjects(params.ComponentUnits) case appsv1alpha1.Stateless: funcs = GetDeploymentRollingUpgradeFuncs() diff --git a/controllers/apps/configuration/sync_upgrade_policy.go b/controllers/apps/configuration/sync_upgrade_policy.go index 1404024c32b..ba8270a5d6a 100644 --- a/controllers/apps/configuration/sync_upgrade_policy.go +++ b/controllers/apps/configuration/sync_upgrade_policy.go @@ -59,12 +59,8 @@ func (o *syncPolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) { return makeReturnedStatus(ESNotSupport), core.MakeError("not support component workload type[%s]", params.WorkloadType()) case appsv1alpha1.Stateless: funcs = GetDeploymentRollingUpgradeFuncs() - case appsv1alpha1.Consensus: - funcs = GetConsensusRollingUpgradeFuncs() - case appsv1alpha1.Stateful: - funcs = GetStatefulSetRollingUpgradeFuncs() - case appsv1alpha1.Replication: - funcs = GetReplicationRollingUpgradeFuncs() + case appsv1alpha1.Consensus, appsv1alpha1.Replication, appsv1alpha1.Stateful: + funcs = GetRSMRollingUpgradeFuncs() } pods, err := funcs.GetPodsFunc(params) diff --git a/controllers/apps/configuration/types.go b/controllers/apps/configuration/types.go index 4afaa420afc..67d17c7b3ad 100644 --- a/controllers/apps/configuration/types.go +++ b/controllers/apps/configuration/types.go @@ -44,27 +44,9 @@ type RollingUpgradeFuncs struct { RestartComponent RestartComponent } -func GetConsensusRollingUpgradeFuncs() RollingUpgradeFuncs { +func GetRSMRollingUpgradeFuncs() RollingUpgradeFuncs { return RollingUpgradeFuncs{ - GetPodsFunc: getConsensusPods, - RestartContainerFunc: commonStopContainerWithPod, - OnlineUpdatePodFunc: commonOnlineUpdateWithPod, - RestartComponent: restartStatefulComponent, - } -} - -func GetStatefulSetRollingUpgradeFuncs() RollingUpgradeFuncs { - return RollingUpgradeFuncs{ - GetPodsFunc: getStatefulSetPods, - RestartContainerFunc: commonStopContainerWithPod, - OnlineUpdatePodFunc: commonOnlineUpdateWithPod, - RestartComponent: restartStatefulComponent, - } -} - -func GetReplicationRollingUpgradeFuncs() RollingUpgradeFuncs { - return RollingUpgradeFuncs{ - GetPodsFunc: getReplicationSetPods, + GetPodsFunc: getRSMPods, RestartContainerFunc: commonStopContainerWithPod, OnlineUpdatePodFunc: commonOnlineUpdateWithPod, RestartComponent: restartStatefulComponent, diff --git a/controllers/apps/operations/ops_progress_util.go b/controllers/apps/operations/ops_progress_util.go index 0ff1112d2a7..d5fd0b3922d 100644 --- a/controllers/apps/operations/ops_progress_util.go +++ b/controllers/apps/operations/ops_progress_util.go @@ -27,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + "k8s.io/kubectl/pkg/util/podutils" "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" @@ -292,7 +293,7 @@ func handleCancelProgressForPodsRollingUpdate( objectKey := getProgressObjectKey(pod.Kind, pod.Name) progressDetail := appsv1alpha1.ProgressStatusDetail{ObjectKey: objectKey} if !pod.CreationTimestamp.Before(&opsCancelTime) && - components.PodIsAvailable(workloadType, &pod, minReadySeconds) { + podIsAvailable(workloadType, &pod, minReadySeconds) { completedCount += 1 handleSucceedProgressDetail(opsRes, pgRes, compStatus, progressDetail) continue @@ -305,6 +306,20 @@ func handleCancelProgressForPodsRollingUpdate( return completedCount } +func podIsAvailable(workloadType appsv1alpha1.WorkloadType, pod *corev1.Pod, minReadySeconds int32) bool { + if pod == nil { + return false + } + switch workloadType { + case appsv1alpha1.Consensus, appsv1alpha1.Replication: + return intctrlutil.PodIsReadyWithLabel(*pod) + case appsv1alpha1.Stateful, appsv1alpha1.Stateless: + return podutils.IsPodAvailable(pod, minReadySeconds, metav1.Time{Time: time.Now()}) + default: + panic("unknown workload type") + } +} + // handlePendingProgressDetail handles the pending progressDetail and sets it to progressDetails. func handlePendingProgressDetail(opsRes *OpsResource, compStatus *appsv1alpha1.OpsRequestComponentStatus, @@ -379,7 +394,7 @@ func podProcessedSuccessful(workloadType appsv1alpha1.WorkloadType, minReadySeconds int32, componentPhase appsv1alpha1.ClusterComponentPhase, opsIsCompleted bool) bool { - if !components.PodIsAvailable(workloadType, pod, minReadySeconds) { + if !podIsAvailable(workloadType, pod, minReadySeconds) { return false } return (opsIsCompleted && componentPhase == appsv1alpha1.RunningClusterCompPhase) || !pod.CreationTimestamp.Before(&opsStartTime) @@ -506,7 +521,7 @@ func handleScaleOutProgress(reqCtx intctrlutil.RequestCtx, objectKey := getProgressObjectKey(v.Kind, v.Name) progressDetail := appsv1alpha1.ProgressStatusDetail{ObjectKey: objectKey} pgRes.opsMessageKey = "create" - if components.PodIsAvailable(workloadType, &v, minReadySeconds) { + if podIsAvailable(workloadType, &v, minReadySeconds) { completedCount += 1 handleSucceedProgressDetail(opsRes, pgRes, compStatus, progressDetail) continue @@ -572,7 +587,7 @@ func handleScaleDownProgress( } // handle the re-created pods if these pods are failed before doing horizontal scaling. pgRes.opsMessageKey = "re-create" - if components.PodIsAvailable(workloadType, &pod, minReadySeconds) { + if podIsAvailable(workloadType, &pod, minReadySeconds) { completedCount += 1 handleSucceedProgressDetail(opsRes, pgRes, compStatus, progressDetail) continue diff --git a/controllers/apps/operations/switchover_util.go b/controllers/apps/operations/switchover_util.go index 28b409e6c36..9c75c3a08d3 100644 --- a/controllers/apps/operations/switchover_util.go +++ b/controllers/apps/operations/switchover_util.go @@ -35,6 +35,7 @@ import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/controllers/apps/components" + "github.com/apecloud/kubeblocks/internal/common" "github.com/apecloud/kubeblocks/internal/constant" intctrlcomputil "github.com/apecloud/kubeblocks/internal/controller/component" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" @@ -69,8 +70,8 @@ func needDoSwitchover(ctx context.Context, if err != nil { return false, err } - podParent, _ := components.ParseParentNameAndOrdinal(pod.Name) - siParent, o := components.ParseParentNameAndOrdinal(switchover.InstanceName) + podParent, _ := common.ParseParentNameAndOrdinal(pod.Name) + siParent, o := common.ParseParentNameAndOrdinal(switchover.InstanceName) if podParent != siParent || o < 0 || o >= int32(len(podList.Items)) { return false, errors.New("switchover.InstanceName is invalid") } diff --git a/controllers/apps/opsrequest_controller_test.go b/controllers/apps/opsrequest_controller_test.go index b588eb823db..8e7f52ac22c 100644 --- a/controllers/apps/opsrequest_controller_test.go +++ b/controllers/apps/opsrequest_controller_test.go @@ -41,7 +41,6 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" opsutil "github.com/apecloud/kubeblocks/controllers/apps/operations/util" "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controllerutil" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" @@ -181,24 +180,16 @@ var _ = Describe("OpsRequest Controller", func() { } var mysqlSts *appsv1.StatefulSet var mysqlRSM *workloads.ReplicatedStateMachine - if controllerutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) - mysqlRSM = &rsmList.Items[0] - mysqlSts = testapps.NewStatefulSetFactory(mysqlRSM.Namespace, mysqlRSM.Name, clusterKey.Name, mysqlCompName). - SetReplicas(*mysqlRSM.Spec.Replicas).Create(&testCtx).GetObject() - Expect(testapps.ChangeObjStatus(&testCtx, mysqlSts, func() { - testk8s.MockStatefulSetReady(mysqlSts) - })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, mysqlRSM, func() { - testk8s.MockRSMReady(mysqlRSM, pod) - })).ShouldNot(HaveOccurred()) - } else { - stsList := testk8s.ListAndCheckStatefulSetWithComponent(&testCtx, clusterKey, mysqlCompName) - mysqlSts = &stsList.Items[0] - Expect(testapps.ChangeObjStatus(&testCtx, mysqlSts, func() { - testk8s.MockStatefulSetReady(mysqlSts) - })).ShouldNot(HaveOccurred()) - } + rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) + mysqlRSM = &rsmList.Items[0] + mysqlSts = testapps.NewStatefulSetFactory(mysqlRSM.Namespace, mysqlRSM.Name, clusterKey.Name, mysqlCompName). + SetReplicas(*mysqlRSM.Spec.Replicas).Create(&testCtx).GetObject() + Expect(testapps.ChangeObjStatus(&testCtx, mysqlSts, func() { + testk8s.MockStatefulSetReady(mysqlSts) + })).ShouldNot(HaveOccurred()) + Expect(testapps.ChangeObjStatus(&testCtx, mysqlRSM, func() { + testk8s.MockRSMReady(mysqlRSM, pod) + })).ShouldNot(HaveOccurred()) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.RunningClusterPhase)) By("send VerticalScalingOpsRequest successfully") @@ -238,15 +229,9 @@ var _ = Describe("OpsRequest Controller", func() { // })).Should(Succeed()) By("mock bring Cluster and changed component back to running status") - if controllerutil.IsRSMEnabled() { - Expect(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(mysqlRSM), func(tmpRSM *workloads.ReplicatedStateMachine) { - testk8s.MockRSMReady(tmpRSM, pod) - })()).ShouldNot(HaveOccurred()) - } else { - Expect(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(mysqlSts), func(tmpSts *appsv1.StatefulSet) { - testk8s.MockStatefulSetReady(tmpSts) - })()).ShouldNot(HaveOccurred()) - } + Expect(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(mysqlRSM), func(tmpRSM *workloads.ReplicatedStateMachine) { + testk8s.MockRSMReady(tmpRSM, pod) + })()).ShouldNot(HaveOccurred()) Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, mysqlCompName)).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.RunningClusterPhase)) // checkLatestOpsHasProcessed(clusterKey) @@ -273,15 +258,9 @@ var _ = Describe("OpsRequest Controller", func() { targetRequests = scalingCtx.target.resource.Requests } - if controllerutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) - mysqlRSM = &rsmList.Items[0] - Expect(reflect.DeepEqual(mysqlRSM.Spec.Template.Spec.Containers[0].Resources.Requests, targetRequests)).Should(BeTrue()) - } else { - stsList := testk8s.ListAndCheckStatefulSetWithComponent(&testCtx, clusterKey, mysqlCompName) - mysqlSts = &stsList.Items[0] - Expect(reflect.DeepEqual(mysqlSts.Spec.Template.Spec.Containers[0].Resources.Requests, targetRequests)).Should(BeTrue()) - } + rsmList = testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) + mysqlRSM = &rsmList.Items[0] + Expect(reflect.DeepEqual(mysqlRSM.Spec.Template.Spec.Containers[0].Resources.Requests, targetRequests)).Should(BeTrue()) By("check OpsRequest reclaimed after ttl") Expect(testapps.ChangeObj(&testCtx, verticalScalingOpsRequest, func(lopsReq *appsv1alpha1.OpsRequest) { @@ -358,32 +337,22 @@ var _ = Describe("OpsRequest Controller", func() { }) componentWorkload := func() client.Object { - if controllerutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) - return &rsmList.Items[0] - } - stsList := testk8s.ListAndCheckStatefulSetWithComponent(&testCtx, clusterKey, mysqlCompName) - return &stsList.Items[0] + rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) + return &rsmList.Items[0] } mockCompRunning := func(replicas int32) { - var sts *appsv1.StatefulSet wl := componentWorkload() - if controllerutil.IsRSMEnabled() { - rsm, _ := wl.(*workloads.ReplicatedStateMachine) - sts = testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, mysqlCompName). - SetReplicas(*rsm.Spec.Replicas).GetObject() - testapps.CheckedCreateK8sResource(&testCtx, sts) - } else { - sts, _ = wl.(*appsv1.StatefulSet) - } + rsm, _ := wl.(*workloads.ReplicatedStateMachine) + sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, mysqlCompName). + SetReplicas(*rsm.Spec.Replicas).GetObject() + testapps.CheckedCreateK8sResource(&testCtx, sts) + mockPods := testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, mysqlCompName) - if controllerutil.IsRSMEnabled() { - rsm, _ := wl.(*workloads.ReplicatedStateMachine) - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, mockPods...) - })).ShouldNot(HaveOccurred()) - } + rsm, _ = wl.(*workloads.ReplicatedStateMachine) + Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { + testk8s.MockRSMReady(rsm, mockPods...) + })).ShouldNot(HaveOccurred()) Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) })).ShouldNot(HaveOccurred()) @@ -548,16 +517,14 @@ var _ = Describe("OpsRequest Controller", func() { Eventually(testapps.CheckObjExists(&testCtx, backupKey, vs, true)).Should(Succeed()) By("check the underlying workload been updated") - if controllerutil.IsRSMEnabled() { - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), - func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) - })).Should(Succeed()) - rsm := componentWorkload() - Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(sts *appsv1.StatefulSet) { - sts.Spec.Replicas = &replicas + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), + func(g Gomega, rsm *workloads.ReplicatedStateMachine) { + g.Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) })).Should(Succeed()) - } + rsm := componentWorkload() + Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(sts *appsv1.StatefulSet) { + sts.Spec.Replicas = &replicas + })).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), func(g Gomega, sts *appsv1.StatefulSet) { g.Expect(*sts.Spec.Replicas).Should(Equal(replicas)) @@ -629,16 +596,14 @@ var _ = Describe("OpsRequest Controller", func() { })).Should(Succeed()) By("check the underlying workload been updated") - if controllerutil.IsRSMEnabled() { - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), - func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) - })).Should(Succeed()) - rsm := componentWorkload() - Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(sts *appsv1.StatefulSet) { - sts.Spec.Replicas = &replicas + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), + func(g Gomega, rsm *workloads.ReplicatedStateMachine) { + g.Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) })).Should(Succeed()) - } + rsm := componentWorkload() + Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(sts *appsv1.StatefulSet) { + sts.Spec.Replicas = &replicas + })).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), func(g Gomega, sts *appsv1.StatefulSet) { g.Expect(*sts.Spec.Replicas).Should(Equal(replicas)) diff --git a/controllers/apps/tls_utils_test.go b/controllers/apps/tls_utils_test.go index 4ac61fed483..beb56273bd5 100644 --- a/controllers/apps/tls_utils_test.go +++ b/controllers/apps/tls_utils_test.go @@ -26,7 +26,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" @@ -37,7 +36,6 @@ import ( cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/plan" - "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" @@ -298,14 +296,9 @@ var _ = Describe("TLS self-signed cert function", func() { Eventually(k8sClient.Get(ctx, clusterKey, clusterObj)).Should(Succeed()) Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(1)) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.CreatingClusterPhase)) - var sts appsv1.StatefulSet - if controllerutil.IsRSMEnabled() { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - sts = *components.ConvertRSMToSTS(&rsmList.Items[0]) - } else { - stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) - sts = stsList.Items[0] - } + + rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) + sts := *components.ConvertRSMToSTS(&rsmList.Items[0]) cd := &appsv1alpha1.ClusterDefinition{} Expect(k8sClient.Get(ctx, types.NamespacedName{Name: clusterDefName, Namespace: testCtx.DefaultNamespace}, cd)).Should(Succeed()) cmName := cfgcore.GetInstanceCMName(&sts, &cd.Spec.ComponentDefs[0].ConfigSpecs[0].ComponentTemplateSpec) diff --git a/controllers/apps/transformer_cluster_deletion.go b/controllers/apps/transformer_cluster_deletion.go index c7715c8a598..5a812ad5847 100644 --- a/controllers/apps/transformer_cluster_deletion.go +++ b/controllers/apps/transformer_cluster_deletion.go @@ -24,7 +24,6 @@ import ( "strings" "time" - appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -39,7 +38,6 @@ import ( "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/graph" ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) // ClusterDeletionTransformer handles cluster deletion @@ -201,11 +199,8 @@ func kindsForHalt() ([]client.ObjectList, []client.ObjectList) { nonNamespacedKindsPlus := []client.ObjectList{ &rbacv1.ClusterRoleBindingList{}, } - if intctrlutil.IsRSMEnabled() { - namespacedKindsPlus = append(namespacedKindsPlus, &workloads.ReplicatedStateMachineList{}) - } else { - namespacedKindsPlus = append(namespacedKindsPlus, &corev1.ServiceList{}, &appsv1.StatefulSetList{}, &appsv1.DeploymentList{}) - } + namespacedKindsPlus = append(namespacedKindsPlus, &workloads.ReplicatedStateMachineList{}) + return append(namespacedKinds, namespacedKindsPlus...), append(nonNamespacedKinds, nonNamespacedKindsPlus...) } diff --git a/controllers/k8score/const.go b/controllers/k8score/const.go index d47432f46de..c07ce34506a 100644 --- a/controllers/k8score/const.go +++ b/controllers/k8score/const.go @@ -22,15 +22,4 @@ package k8score const ( // roleChangedAnnotKey is used to mark the role change event has been handled. roleChangedAnnotKey = "role.kubeblocks.io/event-handled" - - // TrueStr values - trueStr = "true" -) - -const ( - ProbeEventOperationNotImpl ProbeEventType = "OperationNotImplemented" - ProbeEventCheckRoleFailed ProbeEventType = "Failed" - ProbeEventRoleInvalid ProbeEventType = "roleInvalid" - ProbeEventRoleChanged ProbeEventType = "roleChanged" - ProbeEventRoleUnChanged ProbeEventType = "roleUnChanged" ) diff --git a/controllers/k8score/role_change_event_handler.go b/controllers/k8score/role_change_event_handler.go deleted file mode 100644 index dd7db82781a..00000000000 --- a/controllers/k8score/role_change_event_handler.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package k8score - -import ( - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/controllers/apps/components" - "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - lorryutil "github.com/apecloud/kubeblocks/lorry/util" -) - -// RoleChangeEventHandler is the event handler for the role change event -type RoleChangeEventHandler struct{} - -var _ EventHandler = &RoleChangeEventHandler{} - -// var term int - -// Handle handles role changed event. -func (r *RoleChangeEventHandler) Handle(cli client.Client, reqCtx intctrlutil.RequestCtx, recorder record.EventRecorder, event *corev1.Event) error { - if event.Reason != string(lorryutil.CheckRoleOperation) { - return nil - } - var ( - err error - annotations = event.GetAnnotations() - ) - // filter role changed event that has been handled - if annotations != nil && annotations[roleChangedAnnotKey] == trueStr { - return nil - } - - if _, err = handleRoleChangedEvent(cli, reqCtx, recorder, event); err != nil { - return err - } - - // event order is crucial in role probing, but it's not guaranteed when controller restarted, so we have to mark them to be filtered - patch := client.MergeFrom(event.DeepCopy()) - if event.Annotations == nil { - event.Annotations = make(map[string]string, 0) - } - event.Annotations[roleChangedAnnotKey] = trueStr - return cli.Patch(reqCtx.Ctx, event, patch) -} - -// handleRoleChangedEvent handles role changed event and return role. -func handleRoleChangedEvent(cli client.Client, reqCtx intctrlutil.RequestCtx, recorder record.EventRecorder, event *corev1.Event) (string, error) { - // parse probe event message - message := ParseProbeEventMessage(reqCtx, event) - if message == nil { - reqCtx.Log.Info("parse probe event message failed", "message", event.Message) - return "", nil - } - - // if probe event operation is not implemented, check role failed or invalid, ignore it - if message.Event == ProbeEventOperationNotImpl || message.Event == ProbeEventCheckRoleFailed || message.Event == ProbeEventRoleInvalid { - reqCtx.Log.Info("probe event failed", "message", message.Message) - return "", nil - } - role := strings.ToLower(message.Role) - - podName := types.NamespacedName{ - Namespace: event.InvolvedObject.Namespace, - Name: event.InvolvedObject.Name, - } - // get pod - pod := &corev1.Pod{} - if err := cli.Get(reqCtx.Ctx, podName, pod); err != nil { - return role, err - } - // event belongs to old pod with the same name, ignore it - if pod.UID != event.InvolvedObject.UID { - return role, nil - } - - // compare the EventTime of the current event object with the lastTimestamp of the last recorded in the pod annotation, - // if the current event's EventTime is earlier than the recorded lastTimestamp in the pod annotation, - // it indicates that the current event has arrived out of order and is expired, so it should not be processed. - lastTimestampStr, ok := pod.Annotations[constant.LastRoleSnapshotVersionAnnotationKey] - if ok { - lastTimestamp, err := time.Parse(time.RFC3339Nano, lastTimestampStr) - if err != nil { - reqCtx.Log.Info("failed to parse last role changed event timestamp from pod annotation", "pod", pod.Name, "error", err.Error()) - return role, err - } - eventLastTS := event.EventTime.Time - if !eventLastTS.After(lastTimestamp) { - reqCtx.Log.Info("event's EventTime is earlier than the recorded lastTimestamp in the pod annotation, it should not be processed.", "event uid", event.UID, "pod", pod.Name, "role", role, "originalRole", message.OriginalRole, "event EventTime", event.EventTime.Time.String(), "annotation lastTimestamp", lastTimestampStr) - return role, nil - } - } - - // get cluster obj of the pod - cluster := &appsv1alpha1.Cluster{} - if err := cli.Get(reqCtx.Ctx, types.NamespacedName{ - Namespace: pod.Namespace, - Name: pod.Labels[constant.AppInstanceLabelKey], - }, cluster); err != nil { - return role, err - } - reqCtx.Log.V(1).Info("handle role changed event", "event uid", event.UID, "cluster", cluster.Name, "pod", pod.Name, "role", role, "originalRole", message.OriginalRole) - compName, componentDef, err := components.GetComponentInfoByPod(reqCtx.Ctx, cli, *cluster, pod) - if err != nil { - return role, err - } - switch componentDef.WorkloadType { - case appsv1alpha1.Consensus: - return role, components.UpdateConsensusSetRoleLabel(cli, reqCtx, event, componentDef, pod, role) - case appsv1alpha1.Replication: - return role, components.HandleReplicationSetRoleChangeEvent(cli, reqCtx, event, cluster, compName, pod, role) - } - return role, nil -} diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml index 18c2abf550a..3815b20abf1 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml @@ -873,6 +873,48 @@ spec: required: - leader type: object + membersStatus: + description: members' status. + items: + properties: + podName: + default: Unknown + description: PodName pod name. + type: string + role: + properties: + accessMode: + default: ReadWrite + description: AccessMode, what service this member + capable. + enum: + - None + - Readonly + - ReadWrite + type: string + canVote: + default: true + description: CanVote, whether this member has voting + rights + type: boolean + isLeader: + default: false + description: IsLeader, whether this member is the + leader + type: boolean + name: + default: leader + description: Name, role name. + type: string + required: + - accessMode + - name + type: object + required: + - podName + - role + type: object + type: array message: additionalProperties: type: string diff --git a/controllers/apps/components/stateful_set_utils.go b/internal/common/stateful_set_utils.go similarity index 57% rename from controllers/apps/components/stateful_set_utils.go rename to internal/common/stateful_set_utils.go index 0d7f7630164..e3779cb2ce0 100644 --- a/controllers/apps/components/stateful_set_utils.go +++ b/internal/common/stateful_set_utils.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package components +package common import ( "context" @@ -26,10 +26,9 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) @@ -50,69 +49,6 @@ func IsMemberOf(set *appsv1.StatefulSet, pod *corev1.Pod) bool { return getParentName(pod) == set.Name } -// isStsAndPodsRevisionConsistent checks if StatefulSet and pods of the StatefulSet have the same revision. -func isStsAndPodsRevisionConsistent(ctx context.Context, cli client.Client, sts *appsv1.StatefulSet) (bool, error) { - pods, err := GetPodListByStatefulSet(ctx, cli, sts) - if err != nil { - return false, err - } - - revisionConsistent := true - if len(pods) != int(*sts.Spec.Replicas) { - return false, nil - } - - for _, pod := range pods { - if intctrlutil.GetPodRevision(&pod) != sts.Status.UpdateRevision { - revisionConsistent = false - break - } - } - return revisionConsistent, nil -} - -// getPods4Delete gets all pods for delete -func getPods4Delete(ctx context.Context, cli client.Client, sts *appsv1.StatefulSet) ([]*corev1.Pod, error) { - if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType { - return nil, nil - } - - pods, err := GetPodListByStatefulSet(ctx, cli, sts) - if err != nil { - return nil, nil - } - - podList := make([]*corev1.Pod, 0) - for i, pod := range pods { - // do nothing if the pod is terminating - if pod.DeletionTimestamp != nil { - continue - } - // do nothing if the pod has the latest version - if intctrlutil.GetPodRevision(&pod) == sts.Status.UpdateRevision { - continue - } - - podList = append(podList, &pods[i]) - } - return podList, nil -} - -// deleteStsPods deletes pods of the StatefulSet manually -func deleteStsPods(ctx context.Context, cli client.Client, sts *appsv1.StatefulSet) error { - pods, err := getPods4Delete(ctx, cli, sts) - if err != nil { - return err - } - for _, pod := range pods { - // delete the pod to trigger associate StatefulSet to re-create it - if err := cli.Delete(ctx, pod); err != nil && !apierrors.IsNotFound(err) { - return err - } - } - return nil -} - // statefulSetOfComponentIsReady checks if statefulSet of component is ready. func statefulSetOfComponentIsReady(sts *appsv1.StatefulSet, statefulStatusRevisionIsEquals bool, targetReplicas *int32) bool { if targetReplicas == nil { @@ -158,12 +94,13 @@ func ParseParentNameAndOrdinal(s string) (string, int32) { // GetPodListByStatefulSet gets statefulSet pod list. func GetPodListByStatefulSet(ctx context.Context, cli client.Client, stsObj *appsv1.StatefulSet) ([]corev1.Pod, error) { podList := &corev1.PodList{} + selector, err := metav1.LabelSelectorAsMap(stsObj.Spec.Selector) + if err != nil { + return nil, err + } if err := cli.List(ctx, podList, &client.ListOptions{Namespace: stsObj.Namespace}, - client.MatchingLabels{ - constant.KBAppComponentLabelKey: stsObj.Labels[constant.KBAppComponentLabelKey], - constant.AppInstanceLabelKey: stsObj.Labels[constant.AppInstanceLabelKey], - }); err != nil { + client.MatchingLabels(selector)); err != nil { return nil, err } var pods []corev1.Pod @@ -174,22 +111,3 @@ func GetPodListByStatefulSet(ctx context.Context, cli client.Client, stsObj *app } return pods, nil } - -// getPodOwnerReferencesSts gets the owner reference statefulSet of the pod. -func getPodOwnerReferencesSts(ctx context.Context, cli client.Client, podObj *corev1.Pod) (*appsv1.StatefulSet, error) { - stsList := &appsv1.StatefulSetList{} - if err := cli.List(ctx, stsList, - &client.ListOptions{Namespace: podObj.Namespace}, - client.MatchingLabels{ - constant.KBAppComponentLabelKey: podObj.Labels[constant.KBAppComponentLabelKey], - constant.AppInstanceLabelKey: podObj.Labels[constant.AppInstanceLabelKey], - }); err != nil { - return nil, err - } - for _, sts := range stsList.Items { - if IsMemberOf(&sts, podObj) { - return &sts, nil - } - } - return nil, nil -} diff --git a/controllers/apps/components/stateful_set_utils_test.go b/internal/common/stateful_set_utils_test.go similarity index 50% rename from controllers/apps/components/stateful_set_utils_test.go rename to internal/common/stateful_set_utils_test.go index 25b1efaccdd..3c8c0e925cf 100644 --- a/controllers/apps/components/stateful_set_utils_test.go +++ b/internal/common/stateful_set_utils_test.go @@ -17,21 +17,14 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package components +package common import ( "testing" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - apps "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" ) @@ -97,65 +90,3 @@ func TestSStatefulSetOfComponentIsReady(t *testing.T) { t.Errorf("StatefulSet should not be ready") } } - -var _ = Describe("StatefulSet utils test", func() { - var ( - clusterName = "test-replication-cluster" - stsName = "test-sts" - role = "Primary" - ) - cleanAll := func() { - By("Cleaning resources") - // delete cluster(and all dependent sub-resources), clusterversion and clusterdef - testapps.ClearClusterResources(&testCtx) - // clear rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced resources - // testapps.ClearResources(&testCtx, generics.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml, client.GracePeriodSeconds(0)) - } - - BeforeEach(cleanAll) - AfterEach(cleanAll) - - When("Updating a StatefulSet with `OnDelete` UpdateStrategy", func() { - It("will not update pods of the StatefulSet util the pods have been manually deleted", func() { - By("Creating a StatefulSet") - sts := testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, stsName, clusterName, testapps.DefaultRedisCompSpecName). - AddContainer(corev1.Container{Name: testapps.DefaultRedisContainerName, Image: testapps.DefaultRedisImageName}). - AddAppInstanceLabel(clusterName). - AddAppComponentLabel(testapps.DefaultRedisCompSpecName). - AddAppManangedByLabel(). - AddRoleLabel(role). - SetReplicas(1). - Create(&testCtx).GetObject() - - By("Creating pods by the StatefulSet") - testapps.MockReplicationComponentPods(nil, testCtx, sts, clusterName, testapps.DefaultRedisCompSpecName, nil) - Expect(isStsAndPodsRevisionConsistent(testCtx.Ctx, k8sClient, sts)).Should(BeTrue()) - - By("Updating the StatefulSet's UpdateRevision") - sts.Status.UpdateRevision = "new-mock-revision" - testk8s.PatchStatefulSetStatus(&testCtx, sts.Name, sts.Status) - podList, err := GetPodListByStatefulSet(ctx, k8sClient, sts) - Expect(err).To(Succeed()) - Expect(len(podList)).To(Equal(1)) - - By("Testing get the StatefulSet of the pod") - ownerSts, err := getPodOwnerReferencesSts(ctx, k8sClient, &podList[0]) - Expect(err).To(Succeed()) - Expect(ownerSts).ShouldNot(BeNil()) - - By("Deleting the pods of StatefulSet") - Expect(deleteStsPods(testCtx.Ctx, k8sClient, sts)).Should(Succeed()) - podList, err = GetPodListByStatefulSet(ctx, k8sClient, sts) - Expect(err).To(Succeed()) - Expect(len(podList)).To(Equal(0)) - - By("Creating new pods by StatefulSet with new UpdateRevision") - testapps.MockReplicationComponentPods(nil, testCtx, sts, clusterName, testapps.DefaultRedisCompSpecName, nil) - Expect(isStsAndPodsRevisionConsistent(testCtx.Ctx, k8sClient, sts)).Should(BeTrue()) - }) - }) -}) diff --git a/internal/controller/builder/builder_container_test.go b/internal/controller/builder/builder_container_test.go index f0316170148..2785d3c8990 100644 --- a/internal/controller/builder/builder_container_test.go +++ b/internal/controller/builder/builder_container_test.go @@ -22,10 +22,10 @@ package builder import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/intstr" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" ) var _ = Describe("container builder", func() { diff --git a/internal/controller/factory/builder.go b/internal/controller/factory/builder.go index e43ec13f932..ea6bc24fce1 100644 --- a/internal/controller/factory/builder.go +++ b/internal/controller/factory/builder.go @@ -20,7 +20,6 @@ along with this program. If not, see . package factory import ( - "embed" "encoding/base64" "encoding/hex" "encoding/json" @@ -32,14 +31,12 @@ import ( "github.com/google/uuid" snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" - "github.com/leaanthony/debme" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/rand" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" @@ -61,53 +58,6 @@ const ( MountPath = "/etc/pki/tls" ) -var ( - //go:embed cue/* - cueTemplates embed.FS - cacheCtx = map[string]interface{}{} -) - -func getCacheCUETplValue(key string, valueCreator func() (*intctrlutil.CUETpl, error)) (*intctrlutil.CUETpl, error) { - vIf, ok := cacheCtx[key] - if ok { - return vIf.(*intctrlutil.CUETpl), nil - } - v, err := valueCreator() - if err != nil { - return nil, err - } - cacheCtx[key] = v - return v, err -} - -func buildFromCUE(tplName string, fillMap map[string]any, lookupKey string, target any) error { - cueFS, _ := debme.FS(cueTemplates, "cue") - cueTpl, err := getCacheCUETplValue(tplName, func() (*intctrlutil.CUETpl, error) { - return intctrlutil.NewCUETplFromBytes(cueFS.ReadFile(tplName)) - }) - if err != nil { - return err - } - cueValue := intctrlutil.NewCUEBuilder(*cueTpl) - - for k, v := range fillMap { - if err := cueValue.FillObj(k, v); err != nil { - return err - } - } - - b, err := cueValue.Lookup(lookupKey) - if err != nil { - return err - } - - if err = json.Unmarshal(b, target); err != nil { - return err - } - - return nil -} - func processContainersInjection(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, @@ -246,94 +196,6 @@ func BuildPersistentVolumeClaimLabels(component *component.SynthesizedComponent, } } -func BuildSvcListWithCustomAttributes(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, - customAttributeSetter func(*corev1.Service)) ([]*corev1.Service, error) { - services := BuildSvcList(cluster, component) - if customAttributeSetter != nil { - for _, svc := range services { - customAttributeSetter(svc) - } - } - return services, nil -} - -func BuildSvcList(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) []*corev1.Service { - wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) - wellKnownLabels[constant.AppComponentLabelKey] = component.CompDefName - selectors := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) - delete(selectors, constant.AppNameLabelKey) - var result = make([]*corev1.Service, 0) - for _, item := range component.Services { - if len(item.Spec.Ports) == 0 { - continue - } - name := fmt.Sprintf("%s-%s", cluster.Name, component.Name) - if len(item.Name) > 0 { - name = fmt.Sprintf("%s-%s-%s", cluster.Name, component.Name, item.Name) - } - - svcBuilder := builder.NewServiceBuilder(cluster.Namespace, name). - AddLabelsInMap(wellKnownLabels). - AddAnnotationsInMap(item.Annotations). - AddSelectorsInMap(selectors). - AddPorts(item.Spec.Ports...) - if len(item.Spec.Type) > 0 { - svcBuilder.SetType(item.Spec.Type) - } - svc := svcBuilder.GetObject() - result = append(result, svc) - } - return result -} - -func BuildHeadlessSvc(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent) *corev1.Service { - wellKnownLabels := buildWellKnownLabels(component.ClusterDefName, cluster.Name, component.Name) - wellKnownLabels[constant.AppComponentLabelKey] = component.CompDefName - monitorAnnotations := func() map[string]string { - annotations := make(map[string]string, 0) - falseStr := "false" - trueStr := "true" - switch { - case !component.Monitor.Enable: - annotations["monitor.kubeblocks.io/scrape"] = falseStr - annotations["monitor.kubeblocks.io/agamotto"] = falseStr - case component.Monitor.BuiltIn: - annotations["monitor.kubeblocks.io/scrape"] = falseStr - annotations["monitor.kubeblocks.io/agamotto"] = trueStr - default: - annotations["monitor.kubeblocks.io/scrape"] = trueStr - annotations["monitor.kubeblocks.io/path"] = component.Monitor.ScrapePath - annotations["monitor.kubeblocks.io/port"] = strconv.Itoa(int(component.Monitor.ScrapePort)) - annotations["monitor.kubeblocks.io/scheme"] = "http" - annotations["monitor.kubeblocks.io/agamotto"] = falseStr - } - return annotations - }() - servicePorts := func() []corev1.ServicePort { - var servicePorts []corev1.ServicePort - for _, container := range component.PodSpec.Containers { - for _, port := range container.Ports { - servicePort := corev1.ServicePort{ - Name: port.Name, - Protocol: port.Protocol, - Port: port.ContainerPort, - TargetPort: intstr.FromString(port.Name), - } - servicePorts = append(servicePorts, servicePort) - } - } - return servicePorts - }() - return builder.NewHeadlessServiceBuilder(cluster.Namespace, fmt.Sprintf("%s-%s-headless", cluster.Name, component.Name)). - AddLabelsInMap(wellKnownLabels). - AddAnnotationsInMap(monitorAnnotations). - AddSelector(constant.AppInstanceLabelKey, cluster.Name). - AddSelector(constant.AppManagedByLabelKey, constant.AppName). - AddSelector(constant.KBAppComponentLabelKey, component.Name). - AddPorts(servicePorts...). - GetObject() -} - func BuildSts(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, envConfigName string) (*appsv1.StatefulSet, error) { vctToPVC := func(vct corev1.PersistentVolumeClaimTemplate) corev1.PersistentVolumeClaim { @@ -890,25 +752,6 @@ func BuildPDB(cluster *appsv1alpha1.Cluster, component *component.SynthesizedCom GetObject() } -func BuildDeploy(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, envConfigName string) (*appsv1.Deployment, error) { - const tplFile = "deployment_template.cue" - deploy := appsv1.Deployment{} - if err := buildFromCUE(tplFile, map[string]any{ - "cluster": cluster, - "component": component, - }, "deployment", &deploy); err != nil { - return nil, err - } - - if component.StatelessSpec != nil { - deploy.Spec.Strategy = component.StatelessSpec.UpdateStrategy - } - if err := processContainersInjection(reqCtx, cluster, component, envConfigName, &deploy.Spec.Template.Spec); err != nil { - return nil, err - } - return &deploy, nil -} - func BuildPVC(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, vct *corev1.PersistentVolumeClaimTemplate, diff --git a/internal/controller/factory/builder_test.go b/internal/controller/factory/builder_test.go index 1521f0931d7..5a7cb4526a8 100644 --- a/internal/controller/factory/builder_test.go +++ b/internal/controller/factory/builder_test.go @@ -182,13 +182,6 @@ var _ = Describe("builder", func() { Expect(pvc.Labels[constant.VolumeTypeLabelKey]).ShouldNot(BeEmpty()) }) - It("builds Service correctly", func() { - _, cluster, synthesizedComponent := newClusterObjs(nil) - svcList, err := BuildSvcListWithCustomAttributes(cluster, synthesizedComponent, nil) - Expect(err).Should(BeNil()) - Expect(svcList).ShouldNot(BeEmpty()) - }) - It("builds Conn. Credential correctly", func() { var ( clusterDefObj = testapps.NewClusterDefFactoryWithConnCredential("conn-cred").GetObject() @@ -382,14 +375,6 @@ var _ = Describe("builder", func() { Expect(*rsm.Spec.MemberUpdateStrategy).Should(BeEquivalentTo(workloads.BestEffortParallelUpdateStrategy)) }) - It("builds Deploy correctly", func() { - reqCtx := newReqCtx() - _, cluster, synthesizedComponent := newClusterObjs(nil) - deploy, err := BuildDeploy(reqCtx, cluster, synthesizedComponent, "") - Expect(err).Should(BeNil()) - Expect(deploy).ShouldNot(BeNil()) - }) - It("builds PDB correctly", func() { _, cluster, synthesizedComponent := newClusterObjs(nil) pdb := BuildPDB(cluster, synthesizedComponent) @@ -515,14 +500,6 @@ var _ = Describe("builder", func() { Expect(obj.Driver).Should(Equal(driverName)) }) - It("builds headless svc correctly", func() { - _, cluster, synthesizedComponent := newClusterObjs(nil) - expectSvcName := fmt.Sprintf("%s-%s-headless", cluster.Name, synthesizedComponent.Name) - obj := BuildHeadlessSvc(cluster, synthesizedComponent) - Expect(obj).ShouldNot(BeNil()) - Expect(obj.Name).Should(Equal(expectSvcName)) - }) - It("builds cfg manager tools correctly", func() { _, cluster, synthesizedComponent := newClusterObjs(nil) cfgManagerParams := &cfgcm.CfgManagerBuildParams{ diff --git a/internal/controller/factory/cue/deployment_template.cue b/internal/controller/factory/cue/deployment_template.cue deleted file mode 100644 index c58870d9dc4..00000000000 --- a/internal/controller/factory/cue/deployment_template.cue +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -cluster: { - metadata: { - namespace: string - name: string - } - spec: { - clusterVersionRef: string - } -} -component: { - clusterDefName: string - name: string - compDefName: string - workloadType: string - replicas: int - podSpec: { - containers: [...] - enableServiceLinks: bool | *false - } - volumeClaimTemplates: [...] -} - -deployment: { - "apiVersion": "apps/v1" - "kind": "Deployment" - "metadata": { - namespace: cluster.metadata.namespace - name: "\(cluster.metadata.name)-\(component.name)" - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": cluster.metadata.name - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - } - "spec": { - replicas: component.replicas - minReadySeconds: 10 - selector: { - matchLabels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": "\(cluster.metadata.name)" - "app.kubernetes.io/managed-by": "kubeblocks" - - "apps.kubeblocks.io/component-name": "\(component.name)" - } - } - template: { - metadata: { - labels: { - "app.kubernetes.io/name": "\(component.clusterDefName)" - "app.kubernetes.io/instance": "\(cluster.metadata.name)" - "app.kubernetes.io/managed-by": "kubeblocks" - "app.kubernetes.io/component": "\(component.compDefName)" - if cluster.spec.clusterVersionRef != _|_ { - "app.kubernetes.io/version": "\(cluster.spec.clusterVersionRef)" - } - "apps.kubeblocks.io/component-name": "\(component.name)" - "apps.kubeblocks.io/workload-type": "\(component.workloadType)" - } - } - spec: component.podSpec - } - } -} diff --git a/internal/controller/plan/prepare_test.go b/internal/controller/plan/prepare_test.go index 6f013e73304..6cea96e52b8 100644 --- a/internal/controller/plan/prepare_test.go +++ b/internal/controller/plan/prepare_test.go @@ -33,7 +33,6 @@ import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" - "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/component" "github.com/apecloud/kubeblocks/internal/controller/configuration" "github.com/apecloud/kubeblocks/internal/controller/factory" @@ -76,9 +75,6 @@ func buildComponentResources(reqCtx intctrlutil.RequestCtx, cli client.Client, resources = append(resources, workload) }() - svc := factory.BuildHeadlessSvc(cluster, component) - resources = append(resources, svc) - var podSpec *corev1.PodSpec sts, ok := workload.(*appsv1.StatefulSet) if ok { @@ -138,32 +134,10 @@ func buildComponentResources(reqCtx intctrlutil.RequestCtx, cli client.Client, panic("this shouldn't happen") } - svcList, err := factory.BuildSvcListWithCustomAttributes(cluster, component, func(svc *corev1.Service) { - switch component.WorkloadType { - case appsv1alpha1.Consensus: - addLeaderSelectorLabels(svc, component) - case appsv1alpha1.Replication: - svc.Spec.Selector[constant.RoleLabelKey] = "primary" - } - }) - if err != nil { - return nil, err - } - for _, svc := range svcList { - resources = append(resources, svc) - } - // REVIEW/TODO: // - need higher level abstraction handling // - or move this module to part operator controller handling switch component.WorkloadType { - case appsv1alpha1.Stateless: - if err := workloadProcessor( - func(envConfig *corev1.ConfigMap) (client.Object, error) { - return factory.BuildDeploy(reqCtx, cluster, component, "") - }); err != nil { - return nil, err - } case appsv1alpha1.Stateful, appsv1alpha1.Consensus, appsv1alpha1.Replication: if err := workloadProcessor( func(envConfig *corev1.ConfigMap) (client.Object, error) { @@ -176,14 +150,6 @@ func buildComponentResources(reqCtx intctrlutil.RequestCtx, cli client.Client, return resources, nil } -// TODO multi roles with same accessMode support -func addLeaderSelectorLabels(service *corev1.Service, component *component.SynthesizedComponent) { - leader := component.ConsensusSpec.Leader - if len(leader.Name) > 0 { - service.Spec.Selector[constant.RoleLabelKey] = leader.Name - } -} - var _ = Describe("Cluster Controller", func() { cleanEnv := func() { @@ -242,7 +208,7 @@ var _ = Describe("Cluster Controller", func() { GetObject() }) - It("should construct env, headless service, deployment and external service objects", func() { + It("should construct pdb", func() { reqCtx := intctrlutil.RequestCtx{ Ctx: ctx, Log: logger, @@ -263,10 +229,6 @@ var _ = Describe("Cluster Controller", func() { expects := []string{ "PodDisruptionBudget", - "Service", - "ConfigMap", - "Service", - "Deployment", } Expect(resources).Should(HaveLen(len(expects))) for i, v := range expects { @@ -314,9 +276,7 @@ var _ = Describe("Cluster Controller", func() { expects := []string{ "PodDisruptionBudget", - "Service", "ConfigMap", - "Service", "StatefulSet", } Expect(resources).Should(HaveLen(len(expects))) @@ -377,9 +337,7 @@ var _ = Describe("Cluster Controller", func() { expects := []string{ "PodDisruptionBudget", - "Service", "ConfigMap", - "Service", "StatefulSet", } Expect(resources).Should(HaveLen(len(expects))) @@ -439,9 +397,7 @@ var _ = Describe("Cluster Controller", func() { expects := []string{ "PodDisruptionBudget", - "Service", "ConfigMap", - "Service", "StatefulSet", } Expect(resources).Should(HaveLen(len(expects))) @@ -504,9 +460,7 @@ var _ = Describe("Cluster Controller", func() { Expect(err).Should(Succeed()) expects := []string{ "PodDisruptionBudget", - "Service", "ConfigMap", - "Service", "StatefulSet", } Expect(resources).Should(HaveLen(len(expects))) @@ -542,7 +496,7 @@ var _ = Describe("Cluster Controller", func() { GetObject() }) - It("should construct env, headless service, statefuset object, besides an external service object", func() { + It("should construct env, statefuset object", func() { reqCtx := intctrlutil.RequestCtx{ Ctx: ctx, Log: logger, @@ -561,14 +515,10 @@ var _ = Describe("Cluster Controller", func() { resources, err := buildComponentResources(reqCtx, testCtx.Cli, clusterDef, clusterVersion, cluster, component) Expect(err).Should(Succeed()) - // REVIEW: (free6om) - // missing connection credential, TLS secret objs check? - Expect(resources).Should(HaveLen(5)) + Expect(resources).Should(HaveLen(3)) Expect(reflect.TypeOf(resources[0]).String()).Should(ContainSubstring("PodDisruptionBudget")) - Expect(reflect.TypeOf(resources[1]).String()).Should(ContainSubstring("Service")) - Expect(reflect.TypeOf(resources[2]).String()).Should(ContainSubstring("ConfigMap")) - Expect(reflect.TypeOf(resources[3]).String()).Should(ContainSubstring("Service")) - Expect(reflect.TypeOf(resources[4]).String()).Should(ContainSubstring("StatefulSet")) + Expect(reflect.TypeOf(resources[1]).String()).Should(ContainSubstring("ConfigMap")) + Expect(reflect.TypeOf(resources[2]).String()).Should(ContainSubstring("StatefulSet")) }) }) diff --git a/internal/controller/rsm/enqueue_ancestor.go b/internal/controller/rsm/enqueue_ancestor.go deleted file mode 100644 index 0cdca0127cc..00000000000 --- a/internal/controller/rsm/enqueue_ancestor.go +++ /dev/null @@ -1,333 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package rsm - -import ( - "context" - "errors" - "fmt" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - - roclient "github.com/apecloud/kubeblocks/internal/controller/client" - "github.com/apecloud/kubeblocks/internal/controller/model" -) - -var _ handler.EventHandler = &EnqueueRequestForAncestor{} - -var log = logf.FromContext(context.Background()).WithName("eventhandler").WithName("EnqueueRequestForAncestor") - -// EnqueueRequestForAncestor enqueues Requests for the ancestor object. -// E.g. the ancestor object creates the StatefulSet/Deployment which then creates the Pod. -// -// If a ReplicatedStateMachine creates Pods, users may reconcile the ReplicatedStateMachine in response to Pod Events using: -// -// - a source.Kind Source with Type of Pod. -// -// - a EnqueueRequestForAncestor EventHandler with an OwnerType of ReplicatedStateMachine and UpToLevel set to 2. -// -// If source kind is corev1.Event, Event.InvolvedObject will be used as the source kind -type EnqueueRequestForAncestor struct { - // Client used to get owner object of - Client roclient.ReadonlyClient - - // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. - OwnerType runtime.Object - - // find event source up to UpToLevel - UpToLevel int - - // InTypes specified the range to look for the ancestor, means all ancestors' type in the looking up tree should be in InTypes. - // OwnerType will be included. - // nil means only look for in OwnerType. - InTypes []runtime.Object - - // groupKind is the cached Group and Kind from OwnerType - groupKind *schema.GroupKind - - // ancestorGroupKinds is the cached Group and Kind from InTypes - ancestorGroupKinds []schema.GroupKind - - // mapper maps GroupVersionKinds to Resources - mapper meta.RESTMapper -} - -type empty struct{} - -// Create implements EventHandler. -func (e *EnqueueRequestForAncestor) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} - e.getOwnerReconcileRequest(evt.Object, reqs) - for req := range reqs { - q.Add(req) - } -} - -// Update implements EventHandler. -func (e *EnqueueRequestForAncestor) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} - e.getOwnerReconcileRequest(evt.ObjectOld, reqs) - e.getOwnerReconcileRequest(evt.ObjectNew, reqs) - for req := range reqs { - q.Add(req) - } -} - -// Delete implements EventHandler. -func (e *EnqueueRequestForAncestor) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} - e.getOwnerReconcileRequest(evt.Object, reqs) - for req := range reqs { - q.Add(req) - } -} - -// Generic implements EventHandler. -func (e *EnqueueRequestForAncestor) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} - e.getOwnerReconcileRequest(evt.Object, reqs) - for req := range reqs { - q.Add(req) - } -} - -// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false -// if the OwnerType could not be parsed using the scheme. -func (e *EnqueueRequestForAncestor) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { - gk, err := e.parseTypeGroupKind(e.OwnerType, scheme) - if err != nil { - return err - } - // Cache the Group and Kind for the OwnerType - e.groupKind = gk - return nil -} - -// parseInTypesGroupKind parses the InTypes into a Group and Kind and caches the result. Returns false -// if the InTypes could not be parsed using the scheme. -func (e *EnqueueRequestForAncestor) parseInTypesGroupKind(scheme *runtime.Scheme) error { - if e.groupKind != nil { - e.ancestorGroupKinds = append(e.ancestorGroupKinds, *e.groupKind) - } - for _, inType := range e.InTypes { - gk, err := e.parseTypeGroupKind(inType, scheme) - if err != nil { - return err - } - // Cache the Group and Kind for the inType - e.ancestorGroupKinds = append(e.ancestorGroupKinds, *gk) - } - return nil -} - -func (e *EnqueueRequestForAncestor) parseTypeGroupKind(object runtime.Object, scheme *runtime.Scheme) (*schema.GroupKind, error) { - // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(object) - if err != nil { - log.Error(err, "Could not get ObjectKinds", "object", fmt.Sprintf("%T", object)) - return nil, err - } - // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. - if len(kinds) != 1 { - err := fmt.Errorf("expected exactly 1 kind for object %T, but found %s kinds", object, kinds) - log.Error(nil, "expected exactly 1 kind for object", "object", fmt.Sprintf("%T", object), "kinds", kinds) - return nil, err - } - return &schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind}, nil -} - -// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile -// owners of object that match e.OwnerType. -func (e *EnqueueRequestForAncestor) getOwnerReconcileRequest(obj client.Object, result map[reconcile.Request]empty) { - // get the object by the ownerRef - object, err := e.getSourceObject(obj) - if err != nil { - return - } - - // find the root object up to UpToLevel - scheme := *model.GetScheme() - ctx := context.Background() - ref, err := e.getOwnerUpTo(ctx, object, e.UpToLevel, scheme) - if err != nil || ref == nil { - return - } - - // Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType - refGV, err := schema.ParseGroupVersion(ref.APIVersion) - if err != nil { - log.Error(err, "Could not parse OwnerReference APIVersion", - "api version", ref.APIVersion) - return - } - - // Compare the OwnerReference Group and Kind against the OwnerType Group and Kind specified by the user. - // If the two match, create a Request for the objected referred to by - // the OwnerReference. Use the Name from the OwnerReference and the Namespace from the - // object in the event. - if ref.Kind == e.groupKind.Kind && refGV.Group == e.groupKind.Group { - // Match found - add a Request for the object referred to in the OwnerReference - request := reconcile.Request{NamespacedName: types.NamespacedName{ - Name: ref.Name, - }} - - // if owner is not namespaced then we should set the namespace to the empty - mapping, err := e.mapper.RESTMapping(*e.groupKind, refGV.Version) - if err != nil { - log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) - return - } - if mapping.Scope.Name() != meta.RESTScopeNameRoot { - request.Namespace = object.GetNamespace() - } - - result[request] = empty{} - } -} - -func (e *EnqueueRequestForAncestor) getSourceObject(object client.Object) (client.Object, error) { - eventObject, ok := object.(*corev1.Event) - // return the object directly if it's not corev1.Event kind - if !ok { - return object, nil - } - - objectRef := eventObject.InvolvedObject - scheme := *model.GetScheme() - // convert ObjectReference to OwnerReference - ownerRef := metav1.OwnerReference{ - APIVersion: objectRef.APIVersion, - Kind: objectRef.Kind, - Name: objectRef.Name, - UID: objectRef.UID, - } - - ctx := context.Background() - // get the object by the ownerRef - sourceObject, err := e.getObjectByOwnerRef(ctx, objectRef.Namespace, ownerRef, scheme) - if err != nil { - return nil, err - } - return sourceObject, nil -} - -// getOwnerUpTo gets the owner of object up to upToLevel. -// E.g. If ReplicatedStateMachine creates the StatefulSet which then creates the Pod, -// if the object is the Pod, then set upToLevel to 2 if you want to find the ReplicatedStateMachine. -// Each level of ownership should be a controller-relationship (i.e. controller=true in ownerReferences). -// nil return if no owner find in any level. -func (e *EnqueueRequestForAncestor) getOwnerUpTo(ctx context.Context, object client.Object, upToLevel int, scheme runtime.Scheme) (*metav1.OwnerReference, error) { - if upToLevel <= 0 { - return nil, nil - } - if object == nil { - return nil, nil - } - ownerRef := metav1.GetControllerOf(object) - if ownerRef == nil { - return nil, nil - } - if upToLevel == 1 { - return ownerRef, nil - } - objectNew, err := e.getObjectByOwnerRef(ctx, object.GetNamespace(), *ownerRef, scheme) - if err != nil { - return nil, err - } - return e.getOwnerUpTo(ctx, objectNew, upToLevel-1, scheme) -} - -func (e *EnqueueRequestForAncestor) getObjectByOwnerRef(ctx context.Context, ownerNameSpace string, ownerRef metav1.OwnerReference, scheme runtime.Scheme) (client.Object, error) { - gv, err := schema.ParseGroupVersion(ownerRef.APIVersion) - if err != nil { - return nil, err - } - gvk := schema.GroupVersionKind{ - Group: gv.Group, - Version: gv.Version, - Kind: ownerRef.Kind, - } - if !e.inAncestorRange(gvk) { - return nil, nil - } - objectRT, err := scheme.New(gvk) - if err != nil { - return nil, err - } - object, ok := objectRT.(client.Object) - if !ok { - return nil, errors.New("runtime object can't be converted to client object") - } - request := reconcile.Request{NamespacedName: types.NamespacedName{ - Name: ownerRef.Name, - }} - // if owner is not namespaced then we should set the namespace to the empty - groupKind := schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind} - mapping, err := e.mapper.RESTMapping(groupKind, gvk.Version) - if err != nil { - return nil, err - } - if mapping.Scope.Name() != meta.RESTScopeNameRoot { - request.Namespace = ownerNameSpace - } - if err := e.Client.Get(ctx, request.NamespacedName, object); err != nil { - return nil, err - } - return object, nil -} - -func (e *EnqueueRequestForAncestor) inAncestorRange(gvk schema.GroupVersionKind) bool { - for _, groupKind := range e.ancestorGroupKinds { - if gvk.Group == groupKind.Group && gvk.Kind == groupKind.Kind { - return true - } - } - return false -} - -var _ inject.Scheme = &EnqueueRequestForAncestor{} - -// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForAncestor. -func (e *EnqueueRequestForAncestor) InjectScheme(s *runtime.Scheme) error { - if err := e.parseOwnerTypeGroupKind(s); err != nil { - return err - } - return e.parseInTypesGroupKind(s) -} - -var _ inject.Mapper = &EnqueueRequestForAncestor{} - -// InjectMapper is called by the Controller to provide the rest mapper used by the manager. -func (e *EnqueueRequestForAncestor) InjectMapper(m meta.RESTMapper) error { - e.mapper = m - return nil -} diff --git a/internal/controller/rsm/enqueue_ancestor_test.go b/internal/controller/rsm/enqueue_ancestor_test.go deleted file mode 100644 index 0f5576f1732..00000000000 --- a/internal/controller/rsm/enqueue_ancestor_test.go +++ /dev/null @@ -1,399 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package rsm - -import ( - "context" - "fmt" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/golang/mock/gomock" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/builder" - "github.com/apecloud/kubeblocks/internal/controller/model" -) - -func init() { - model.AddScheme(workloads.AddToScheme) -} - -var _ = Describe("enqueue ancestor", func() { - scheme := model.GetScheme() - var handler *EnqueueRequestForAncestor - - buildAncestorTree := func() (*workloads.ReplicatedStateMachine, *appsv1.StatefulSet, *corev1.Pod) { - ancestorL2APIVersion := "workloads.kubeblocks.io/v1alpha1" - ancestorL2Kind := "ReplicatedStateMachine" - ancestorL2Name := "ancestor-level-2" - ancestorL1APIVersion := "apps/v1" - ancestorL1Kind := "StatefulSet" - ancestorL1Name := "ancestor-level-1" - objectName := ancestorL1Name + "-0" - - ancestorLevel2 := builder.NewReplicatedStateMachineBuilder(namespace, ancestorL2Name).GetObject() - ancestorLevel2.APIVersion = ancestorL2APIVersion - ancestorLevel2.Kind = ancestorL2Kind - ancestorLevel1 := builder.NewStatefulSetBuilder(namespace, ancestorL1Name). - SetOwnerReferences(ancestorL2APIVersion, ancestorL2Kind, ancestorLevel2). - GetObject() - ancestorLevel1.APIVersion = ancestorL1APIVersion - ancestorLevel1.Kind = ancestorL1Kind - object := builder.NewPodBuilder(namespace, objectName). - SetOwnerReferences(ancestorL1APIVersion, ancestorL1Kind, ancestorLevel1). - GetObject() - - return ancestorLevel2, ancestorLevel1, object - } - - BeforeEach(func() { - handler = &EnqueueRequestForAncestor{ - Client: k8sMock, - OwnerType: &workloads.ReplicatedStateMachine{}, - UpToLevel: 2, - InTypes: []runtime.Object{&appsv1.StatefulSet{}}, - } - }) - - Context("parseOwnerTypeGroupKind", func() { - It("should work well", func() { - Expect(handler.parseOwnerTypeGroupKind(scheme)).Should(Succeed()) - Expect(handler.groupKind.Group).Should(Equal("workloads.kubeblocks.io")) - Expect(handler.groupKind.Kind).Should(Equal("ReplicatedStateMachine")) - }) - }) - - Context("parseInTypesGroupKind", func() { - It("should work well", func() { - Expect(handler.parseInTypesGroupKind(scheme)).Should(Succeed()) - Expect(handler.ancestorGroupKinds).Should(HaveLen(1)) - Expect(handler.ancestorGroupKinds[0].Group).Should(Equal("apps")) - Expect(handler.ancestorGroupKinds[0].Kind).Should(Equal("StatefulSet")) - }) - }) - - Context("getObjectByOwnerRef", func() { - BeforeEach(func() { - Expect(handler.InjectScheme(scheme)).Should(Succeed()) - Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) - }) - - It("should return err if groupVersion parsing error", func() { - wrongAPIVersion := "wrong/group/version" - ownerRef := metav1.OwnerReference{ - APIVersion: wrongAPIVersion, - } - _, err := handler.getObjectByOwnerRef(ctx, namespace, ownerRef, *scheme) - Expect(err).ShouldNot(BeNil()) - Expect(err.Error()).Should(ContainSubstring(wrongAPIVersion)) - }) - - It("should return nil if ancestor's type out of range", func() { - ownerRef := metav1.OwnerReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "foo", - UID: "bar", - } - object, err := handler.getObjectByOwnerRef(ctx, namespace, ownerRef, *scheme) - Expect(err).Should(BeNil()) - Expect(object).Should(BeNil()) - }) - - It("should return the owner object", func() { - ownerName := "foo" - ownerUID := types.UID("bar") - ownerRef := metav1.OwnerReference{ - APIVersion: "apps/v1", - Kind: "StatefulSet", - Name: ownerName, - UID: ownerUID, - } - k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). - DoAndReturn(func(_ context.Context, _ client.ObjectKey, obj *appsv1.StatefulSet, _ ...client.ListOption) error { - obj.Name = ownerName - obj.UID = ownerUID - return nil - }).Times(1) - object, err := handler.getObjectByOwnerRef(ctx, namespace, ownerRef, *scheme) - Expect(err).Should(BeNil()) - Expect(object).ShouldNot(BeNil()) - Expect(object.GetName()).Should(Equal(ownerName)) - Expect(object.GetUID()).Should(Equal(ownerUID)) - }) - }) - - Context("getOwnerUpTo", func() { - BeforeEach(func() { - Expect(handler.InjectScheme(scheme)).Should(Succeed()) - Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) - }) - - It("should work well", func() { - By("set upToLevel to 0") - ownerRef, err := handler.getOwnerUpTo(ctx, nil, 0, *scheme) - Expect(err).Should(BeNil()) - Expect(ownerRef).Should(BeNil()) - - By("set object to nil") - ownerRef, err = handler.getOwnerUpTo(ctx, nil, handler.UpToLevel, *scheme) - Expect(err).Should(BeNil()) - Expect(ownerRef).Should(BeNil()) - - By("builder ancestor tree") - ancestorLevel2, ancestorLevel1, object := buildAncestorTree() - - By("set upToLevel to 1") - ownerRef, err = handler.getOwnerUpTo(ctx, object, 1, *scheme) - Expect(err).Should(BeNil()) - Expect(ownerRef).ShouldNot(BeNil()) - Expect(ownerRef.APIVersion).Should(Equal(ancestorLevel1.APIVersion)) - Expect(ownerRef.Kind).Should(Equal(ancestorLevel1.Kind)) - Expect(ownerRef.Name).Should(Equal(ancestorLevel1.Name)) - Expect(ownerRef.UID).Should(Equal(ancestorLevel1.UID)) - - By("set upToLevel to 2") - k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { - sts.Namespace = objKey.Namespace - sts.Name = objKey.Name - sts.OwnerReferences = ancestorLevel1.OwnerReferences - return nil - }).Times(1) - ownerRef, err = handler.getOwnerUpTo(ctx, object, handler.UpToLevel, *scheme) - Expect(err).Should(BeNil()) - Expect(ownerRef).ShouldNot(BeNil()) - Expect(ownerRef.APIVersion).Should(Equal(ancestorLevel2.APIVersion)) - Expect(ownerRef.Kind).Should(Equal(ancestorLevel2.Kind)) - Expect(ownerRef.Name).Should(Equal(ancestorLevel2.Name)) - Expect(ownerRef.UID).Should(Equal(ancestorLevel2.UID)) - }) - }) - - Context("getSourceObject", func() { - BeforeEach(func() { - Expect(handler.InjectScheme(scheme)).Should(Succeed()) - Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) - }) - - It("should work well", func() { - By("build a non-event object") - name := "foo" - uid := types.UID("bar") - object1 := builder.NewPodBuilder(namespace, name).SetUID(uid).GetObject() - objectSrc1, err := handler.getSourceObject(object1) - Expect(err).Should(BeNil()) - Expect(objectSrc1).Should(Equal(object1)) - - By("build an event object") - handler.InTypes = append(handler.InTypes, &corev1.Pod{}) - Expect(handler.InjectScheme(scheme)).Should(Succeed()) - objectRef := corev1.ObjectReference{ - APIVersion: "v1", - Kind: "Pod", - Namespace: namespace, - Name: object1.Name, - UID: object1.UID, - } - object2 := builder.NewEventBuilder(namespace, "foo"). - SetInvolvedObject(objectRef). - GetObject() - k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &corev1.Pod{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *corev1.Pod, _ ...client.ListOptions) error { - obj.Name = objKey.Name - obj.Namespace = objKey.Namespace - obj.UID = objectRef.UID - return nil - }).Times(1) - objectSrc2, err := handler.getSourceObject(object2) - Expect(err).Should(BeNil()) - Expect(objectSrc2).ShouldNot(BeNil()) - Expect(objectSrc2.GetName()).Should(Equal(object1.Name)) - Expect(objectSrc2.GetNamespace()).Should(Equal(object1.Namespace)) - Expect(objectSrc2.GetUID()).Should(Equal(object1.UID)) - }) - }) - - Context("getOwnerReconcileRequest", func() { - BeforeEach(func() { - Expect(handler.InjectScheme(scheme)).Should(Succeed()) - Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) - }) - - It("should work well", func() { - By("build ancestor tree") - ancestorLevel2, ancestorLevel1, object := buildAncestorTree() - - k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { - sts.Namespace = objKey.Namespace - sts.Name = objKey.Name - sts.OwnerReferences = ancestorLevel1.OwnerReferences - return nil - }).Times(1) - - By("get object with ancestors") - result := make(map[reconcile.Request]empty) - handler.getOwnerReconcileRequest(object, result) - Expect(result).Should(HaveLen(1)) - for request := range result { - Expect(request.Namespace).Should(Equal(ancestorLevel2.Namespace)) - Expect(request.Name).Should(Equal(ancestorLevel2.Name)) - } - - By("set obj not exist") - wrongAPIVersion := "wrong/api/version" - object.OwnerReferences[0].APIVersion = wrongAPIVersion - result = make(map[reconcile.Request]empty) - handler.getOwnerReconcileRequest(object, result) - Expect(result).Should(HaveLen(0)) - - By("set level 1 ancestor's owner not exist") - object.OwnerReferences[0].APIVersion = ancestorLevel1.APIVersion - k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { - sts.Namespace = objKey.Namespace - sts.Name = objKey.Name - return nil - }).Times(1) - result = make(map[reconcile.Request]empty) - handler.getOwnerReconcileRequest(object, result) - Expect(result).Should(HaveLen(0)) - }) - }) - - Context("handler interface", func() { - BeforeEach(func() { - Expect(handler.InjectScheme(scheme)).Should(Succeed()) - Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) - }) - - It("should work well", func() { - By("build events and queue") - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "enqueue-ancestor-test") - ancestorLevel2, ancestorLevel1, object := buildAncestorTree() - createEvent := event.CreateEvent{Object: object} - updateEvent := event.UpdateEvent{ObjectOld: object, ObjectNew: object} - deleteEvent := event.DeleteEvent{Object: object} - genericEvent := event.GenericEvent{Object: object} - - cases := []struct { - name string - testFunc func() - getTimes int - }{ - { - name: "Create", - testFunc: func() { handler.Create(createEvent, queue) }, - getTimes: 1, - }, - { - name: "Update", - testFunc: func() { handler.Update(updateEvent, queue) }, - getTimes: 2, - }, - { - name: "Delete", - testFunc: func() { handler.Delete(deleteEvent, queue) }, - getTimes: 1, - }, - { - name: "Generic", - testFunc: func() { handler.Generic(genericEvent, queue) }, - getTimes: 1, - }, - } - for _, c := range cases { - By(fmt.Sprintf("test %s interface", c.name)) - k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { - sts.Namespace = objKey.Namespace - sts.Name = objKey.Name - sts.OwnerReferences = ancestorLevel1.OwnerReferences - return nil - }).Times(c.getTimes) - c.testFunc() - item, shutdown := queue.Get() - Expect(shutdown).Should(BeFalse()) - request, ok := item.(reconcile.Request) - Expect(ok).Should(BeTrue()) - Expect(request.Namespace).Should(Equal(ancestorLevel2.Namespace)) - Expect(request.Name).Should(Equal(ancestorLevel2.Name)) - queue.Done(item) - queue.Forget(item) - } - - queue.ShutDown() - }) - }) -}) - -type fakeMapper struct{} - -func (f *fakeMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - return schema.GroupVersionKind{}, nil -} - -func (f *fakeMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - return nil, nil -} - -func (f *fakeMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - return schema.GroupVersionResource{}, nil -} - -func (f *fakeMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - return nil, nil -} - -func (f *fakeMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - return &meta.RESTMapping{Scope: meta.RESTScopeNamespace}, nil -} - -func (f *fakeMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - return nil, nil -} - -func (f *fakeMapper) ResourceSingularizer(resource string) (singular string, err error) { - return "", nil -} - -func newFakeMapper() meta.RESTMapper { - return &fakeMapper{} -} diff --git a/internal/controller/rsm/pod_role_event_handler.go b/internal/controller/rsm/pod_role_event_handler.go index 63c3f0bd440..36ce5ae973a 100644 --- a/internal/controller/rsm/pod_role_event_handler.go +++ b/internal/controller/rsm/pod_role_event_handler.go @@ -37,9 +37,6 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) -// TODO(free6om): dedup copied funcs from event_controllers.go -// TODO(free6om): refactor event_controller.go as it should NOT import controllers/apps/component/* - type PodRoleEventHandler struct{} // probeEventType defines the type of probe event. diff --git a/internal/controller/rsm/update_plan.go b/internal/controller/rsm/update_plan.go index 16c2ee4abc5..0a6b712411d 100644 --- a/internal/controller/rsm/update_plan.go +++ b/internal/controller/rsm/update_plan.go @@ -94,8 +94,8 @@ func (p *realUpdatePlan) build() { return } - rolePriorityMap := composeRolePriorityMap(p.rsm) - sortPods(p.pods, rolePriorityMap, false) + rolePriorityMap := ComposeRolePriorityMap(p.rsm.Spec.Roles) + SortPods(p.pods, rolePriorityMap, false) // generate plan by MemberUpdateStrategy switch *p.rsm.Spec.MemberUpdateStrategy { diff --git a/internal/controller/rsm/utils.go b/internal/controller/rsm/utils.go index 7b54b83a964..9fbb00d68cf 100644 --- a/internal/controller/rsm/utils.go +++ b/internal/controller/rsm/utils.go @@ -63,10 +63,10 @@ const ( var podNameRegex = regexp.MustCompile(`(.*)-([0-9]+)$`) -// sortPods sorts pods by their role priority +// SortPods sorts pods by their role priority // e.g.: unknown -> empty -> learner -> follower1 -> follower2 -> leader, with follower1.Name < follower2.Name // reverse it if reverse==true -func sortPods(pods []corev1.Pod, rolePriorityMap map[string]int, reverse bool) { +func SortPods(pods []corev1.Pod, rolePriorityMap map[string]int, reverse bool) { getRoleFunc := func(i int) string { return getRoleName(pods[i]) } @@ -88,11 +88,12 @@ func sortMembersStatus(membersStatus []workloads.MemberStatus, rolePriorityMap m sortMembers(membersStatus, rolePriorityMap, getRoleFunc, getOrdinalFunc, true) } -func sortMembers[T any](membersStatus []T, +// sortMembers sorts items by role priority and pod ordinal. +func sortMembers[T any](items []T, rolePriorityMap map[string]int, getRoleFunc getRole, getOrdinalFunc getOrdinal, reverse bool) { - sort.SliceStable(membersStatus, func(i, j int) bool { + sort.SliceStable(items, func(i, j int) bool { if reverse { i, j = j, i } @@ -107,11 +108,11 @@ func sortMembers[T any](membersStatus []T, }) } -// composeRolePriorityMap generates a priority map based on roles. -func composeRolePriorityMap(rsm workloads.ReplicatedStateMachine) map[string]int { +// ComposeRolePriorityMap generates a priority map based on roles. +func ComposeRolePriorityMap(roles []workloads.ReplicaRole) map[string]int { rolePriorityMap := make(map[string]int, 0) rolePriorityMap[""] = emptyPriority - for _, role := range rsm.Spec.Roles { + for _, role := range roles { roleName := strings.ToLower(role.Name) switch { case role.IsLeader: @@ -189,11 +190,12 @@ func setMembersStatus(rsm *workloads.ReplicatedStateMachine, pods []corev1.Pod) } // sort and set - rolePriorityMap := composeRolePriorityMap(*rsm) + rolePriorityMap := ComposeRolePriorityMap(rsm.Spec.Roles) sortMembersStatus(newMembersStatus, rolePriorityMap) rsm.Status.MembersStatus = newMembersStatus } +// getRoleName gets role name of pod 'pod' func getRoleName(pod corev1.Pod) string { return strings.ToLower(pod.Labels[constant.RoleLabelKey]) } diff --git a/internal/controller/rsm/utils_test.go b/internal/controller/rsm/utils_test.go index 00a8959f523..814e73b960d 100644 --- a/internal/controller/rsm/utils_test.go +++ b/internal/controller/rsm/utils_test.go @@ -44,10 +44,10 @@ var _ = Describe("utils test", func() { SetService(&corev1.Service{}). SetRoles(roles). GetObject() - priorityMap = composeRolePriorityMap(*rsm) + priorityMap = ComposeRolePriorityMap(rsm.Spec.Roles) }) - Context("composeRolePriorityMap function", func() { + Context("ComposeRolePriorityMap function", func() { It("should work well", func() { priorityList := []int{ leaderPriority, @@ -63,7 +63,7 @@ var _ = Describe("utils test", func() { }) }) - Context("sortPods function", func() { + Context("SortPods function", func() { It("should work well", func() { pods := []corev1.Pod{ *builder.NewPodBuilder(namespace, "pod-0").AddLabels(roleLabelKey, "follower").GetObject(), @@ -76,7 +76,7 @@ var _ = Describe("utils test", func() { } expectedOrder := []string{"pod-4", "pod-2", "pod-3", "pod-6", "pod-1", "pod-0", "pod-5"} - sortPods(pods, priorityMap, false) + SortPods(pods, priorityMap, false) for i, pod := range pods { Expect(pod.Name).Should(Equal(expectedOrder[i])) } diff --git a/internal/controllerutil/pod_utils.go b/internal/controllerutil/pod_utils.go index 842db70d8bd..901b84c97a2 100644 --- a/internal/controllerutil/pod_utils.go +++ b/internal/controllerutil/pod_utils.go @@ -356,6 +356,21 @@ func GetProbeHTTPPort(pod *corev1.Pod) (int32, error) { return GetPortByPortName(pod, constant.ProbeHTTPPortName) } +// GuessLorryHTTPPort guesses lorry container and serving port. +// TODO(xuriwuyun): should provide a deterministic way to find the lorry serving port. +func GuessLorryHTTPPort(pod *corev1.Pod) (int32, error) { + lorryImage := viper.GetString(constant.KBToolsImage) + for _, container := range pod.Spec.Containers { + if container.Image != lorryImage { + continue + } + if len(container.Ports) > 0 { + return container.Ports[0].ContainerPort, nil + } + } + return 0, fmt.Errorf("lorry port not found") +} + // GetProbeContainerName gets the probe container from pod func GetProbeContainerName(pod *corev1.Pod) (string, error) { lorryImage := viper.GetString(constant.KBToolsImage) diff --git a/internal/generics/type.go b/internal/generics/type.go index a5c7bd4627d..b10ce1a2fd2 100644 --- a/internal/generics/type.go +++ b/internal/generics/type.go @@ -68,7 +68,7 @@ var ConfigMapSignature = func(_ corev1.ConfigMap, _ corev1.ConfigMapList) {} var EndpointsSignature = func(_ corev1.Endpoints, _ corev1.EndpointsList) {} var RSMSignature = func(_ workloads.ReplicatedStateMachine, _ workloads.ReplicatedStateMachineList) {} -var StatefulSetSignature = func(_ appsv1.StatefulSet, _ appsv1.StatefulSetList) {} +var StatefulSetSignature = func(A appsv1.StatefulSet, B appsv1.StatefulSetList) {} var DeploymentSignature = func(_ appsv1.Deployment, _ appsv1.DeploymentList) {} var ReplicaSetSignature = func(_ appsv1.ReplicaSet, _ appsv1.ReplicaSetList) {} diff --git a/lorry/client/client.go b/lorry/client/client.go index 07aa4de0d41..a4b22558c74 100644 --- a/lorry/client/client.go +++ b/lorry/client/client.go @@ -100,7 +100,7 @@ func NewClientWithPod(pod *corev1.Pod, characterType string) (*OperationClient, return nil, fmt.Errorf("pod %v has no ip", pod.Name) } - port, err := intctrlutil.GetProbeHTTPPort(pod) + port, err := intctrlutil.GuessLorryHTTPPort(pod) if err != nil { // not lorry in the pod, just return nil without error return nil, nil diff --git a/test/integration/controller_suite_test.go b/test/integration/controller_suite_test.go index 3ca98b67867..1afd079d2f0 100644 --- a/test/integration/controller_suite_test.go +++ b/test/integration/controller_suite_test.go @@ -45,10 +45,10 @@ import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/controllers/apps" - "github.com/apecloud/kubeblocks/controllers/apps/components" dpctrl "github.com/apecloud/kubeblocks/controllers/dataprotection" "github.com/apecloud/kubeblocks/controllers/k8score" cliutil "github.com/apecloud/kubeblocks/internal/cli/util" + "github.com/apecloud/kubeblocks/internal/common" cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" @@ -103,7 +103,7 @@ func GetConsensusRoleCountMap(testCtx testutil.TestContext, k8sClient client.Cli } sts := stsList.Items[0] - pods, err := components.GetPodListByStatefulSet(testCtx.Ctx, k8sClient, &sts) + pods, err := common.GetPodListByStatefulSet(testCtx.Ctx, k8sClient, &sts) if err != nil { return roleCountMap diff --git a/test/integration/mysql_ha_test.go b/test/integration/mysql_ha_test.go index 840d93d6673..75ad99c3ae5 100644 --- a/test/integration/mysql_ha_test.go +++ b/test/integration/mysql_ha_test.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/controllers/apps/components" + "github.com/apecloud/kubeblocks/internal/common" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" @@ -121,7 +121,7 @@ var _ = Describe("MySQL High-Availability function", func() { By("Checking pods' role label") stsList := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey) sts := &stsList.Items[0] - pods, err := components.GetPodListByStatefulSet(ctx, k8sClient, sts) + pods, err := common.GetPodListByStatefulSet(ctx, k8sClient, sts) Expect(err).To(Succeed()) // should have 3 pods Expect(len(pods)).Should(Equal(3)) diff --git a/test/integration/mysql_reconfigure_test.go b/test/integration/mysql_reconfigure_test.go index fb92347f641..0508fe4e1ea 100644 --- a/test/integration/mysql_reconfigure_test.go +++ b/test/integration/mysql_reconfigure_test.go @@ -27,9 +27,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/controllers/apps/components" clitypes "github.com/apecloud/kubeblocks/internal/cli/types" cliutil "github.com/apecloud/kubeblocks/internal/cli/util" + "github.com/apecloud/kubeblocks/internal/common" "github.com/apecloud/kubeblocks/internal/configuration/core" "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" @@ -152,7 +152,7 @@ var _ = Describe("MySQL Reconfigure function", func() { By("Checking pods' role label") sts := testk8s.ListAndCheckStatefulSet(&testCtx, clusterKey).Items[0] - pods, err := components.GetPodListByStatefulSet(testCtx.Ctx, k8sClient, &sts) + pods, err := common.GetPodListByStatefulSet(testCtx.Ctx, k8sClient, &sts) Expect(err).To(Succeed()) Expect(len(pods)).Should(Equal(3)) diff --git a/test/integration/redis_hscale_test.go b/test/integration/redis_hscale_test.go index 453d6002b49..4c5ddc4f9da 100644 --- a/test/integration/redis_hscale_test.go +++ b/test/integration/redis_hscale_test.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/controllers/apps/components" + "github.com/apecloud/kubeblocks/internal/common" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" @@ -106,7 +106,7 @@ var _ = Describe("Redis Horizontal Scale function", func() { Expect(len(stsList.Items)).Should(BeEquivalentTo(1)) By("Checking pods number and role label in StatefulSet") - podList, err := components.GetPodListByStatefulSet(ctx, k8sClient, &stsList.Items[0]) + podList, err := common.GetPodListByStatefulSet(ctx, k8sClient, &stsList.Items[0]) Expect(err).To(Succeed()) Expect(len(podList)).Should(BeEquivalentTo(replicas)) for _, pod := range podList { From 9610ba641e510f97727b55066b17d5d41a76f250 Mon Sep 17 00:00:00 2001 From: yuanyuan zhang <111744220+michelle-0808@users.noreply.github.com> Date: Thu, 21 Sep 2023 18:26:24 +0800 Subject: [PATCH 12/58] docs: add new addon docs and update config docs (#5161) --- .../environment-variables-and-placeholders.md | 57 +++ .../integration/how-to-add-an-add-on.md | 12 +- docs/developer_docs/integration/monitoring.md | 267 +++++++++++++ .../integration/multi-component.md | 254 +++++++++++++ .../integration/parameter-configuration.md | 353 ++++++++++++++++++ .../integration/parameter-template.md | 230 ++++++++++++ docs/img/addon-confirm-config-changes.png | Bin 0 -> 136785 bytes docs/img/addon-interactive-config-editor.png | Bin 0 -> 297418 bytes docs/img/addon-monitoring-signin.png | Bin 0 -> 96894 bytes docs/img/nebula-aichitecture.png | Bin 0 -> 59423 bytes docs/img/nebula-inter-component-ref.png | Bin 0 -> 48904 bytes .../backup-and-restore/backup/backup-repo.md | 2 +- .../configuration/configuration.md | 43 ++- .../configuration/configuration.md | 41 +- .../feature-and-limit-list-mongodb.md | 2 +- .../configuration/configuration.md | 50 ++- .../configuration/configuration.md | 49 ++- .../configuration/configuration.md | 39 ++ .../configuration/configuration.md | 45 ++- .../manage-vector-databases.md | 16 +- 20 files changed, 1436 insertions(+), 24 deletions(-) create mode 100644 docs/developer_docs/integration/environment-variables-and-placeholders.md create mode 100644 docs/developer_docs/integration/monitoring.md create mode 100644 docs/developer_docs/integration/multi-component.md create mode 100644 docs/developer_docs/integration/parameter-configuration.md create mode 100644 docs/developer_docs/integration/parameter-template.md create mode 100644 docs/img/addon-confirm-config-changes.png create mode 100644 docs/img/addon-interactive-config-editor.png create mode 100644 docs/img/addon-monitoring-signin.png create mode 100644 docs/img/nebula-aichitecture.png create mode 100644 docs/img/nebula-inter-component-ref.png diff --git a/docs/developer_docs/integration/environment-variables-and-placeholders.md b/docs/developer_docs/integration/environment-variables-and-placeholders.md new file mode 100644 index 00000000000..b4e6a591174 --- /dev/null +++ b/docs/developer_docs/integration/environment-variables-and-placeholders.md @@ -0,0 +1,57 @@ +--- +title: Environment variables and placeholders +description: KubeBlocks Environment Variables and Placeholders +keywords: [environment variables, placeholders] +sidebar_position: 10 +sidebar_label: Environment variables and placeholders +--- + +# Environment variables and placeholders + +## Environment variables + +### Automatic pod's container environment variables + +The following variables are injected by KubeBlocks into each pod. + +| Name | Description | +| :--- | :---------- | +| KB_POD_NAME | K8s Pod Name | +| KB_NAMESPACE | K8s Pod Namespace | +| KB_SA_NAME | KubeBlocks Service Account Name | +| KB_NODENAME | K8s Node Name | +| KB_HOSTIP | K8s Host IP address | +| KB_PODIP | K8s Pod IP address | +| KB_PODIPS | K8s Pod IP addresses | +| KB_POD_UID | POD UID (`pod.metadata.uid`) | +| KB_CLUSTER_NAME | KubeBlocks Cluster API object name | +| KB_COMP_NAME | Running pod's KubeBlocks Cluster API object's `.spec.components.name` | +| KB_CLUSTER_COMP_NAME | Running pod's KubeBlocks Cluster API object's `<.metadata.name>-<.spec.components.name>` | +| KB_REPLICA_COUNT | Running pod's component's replica | +| KB_CLUSTER_UID | Running pods' KubeBlocks Cluster API object's `metadata.uid` | +| KB_CLUSTER_UID_POSTFIX_8 | Last eight digits of KB_CLUSTER_UID | +| KB_{ordinal}_HOSTNAME | Running pod's hostname, where `{ordinal}` is the ordinal of pod.
N/A if workloadType=Stateless. | +| KB_POD_FQDN | Running pod's fully qualified domain name (FQDN).
N/A if workloadType=Stateless. | + +## Built-in Place-holders + +### ComponentValueFrom API + +| Name | Description | +| :--- | :---------- | +| POD_ORDINAL | Pod ordinal | +| POD_FQDN | Pod FQDN (fully qualified domain name) | +| POD_NAME | Pod Name | + +### ConnectionCredential API + +| Name | Description | +| :--- | :---------- | +| UUID | Generate a random UUID v4 string. | +| UUID_B64 | Generate a random UUID v4 BASE64 encoded string. | +| UUID_STR_B64 | Generate a random UUID v4 string then BASE64 encoded. | +| UUID_HEX | Generate a random UUID v4 HEX representation. | +| HEADLESS_SVC_FQDN | Headless service FQDN placeholder, value pattern - `$(CLUSTER_NAME)-$(1ST_COMP_NAME)-headless.$(NAMESPACE).svc`, where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; | +| SVC_FQDN | Service FQDN placeholder, value pattern - `$(CLUSTER_NAME)-$(1ST_COMP_NAME).$(NAMESPACE).svc`, where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; | +| SVC_PORT_{PORT_NAME} | A ServicePort's port value with specified port name, i.e, a servicePort JSON struct:
`{"name": "mysql", "targetPort": "mysqlContainerPort", "port": 3306}`, and "$(SVC_PORT_mysql)" in the connection credential value is 3306. | +| RANDOM_PASSWD | Random 8 characters | diff --git a/docs/developer_docs/integration/how-to-add-an-add-on.md b/docs/developer_docs/integration/how-to-add-an-add-on.md index 37eb917bc40..dbc1842a8ec 100644 --- a/docs/developer_docs/integration/how-to-add-an-add-on.md +++ b/docs/developer_docs/integration/how-to-add-an-add-on.md @@ -16,7 +16,7 @@ There are altogether 3 steps to integrate an add-on: 2. Prepare cluster templates. 3. Add an `addon.yaml` file. -## Step 1. Design a blueprint for cluster. +## Step 1. Design a blueprint for cluster Before getting started, make sure to design your cluster blueprint. Think about what you want your cluster to look like. For example: @@ -37,9 +37,9 @@ Cluster Format: Deploying a MySQL 8.0 Standalone. | ClusterVersion | Image: docker.io/mysql:8.0.34 | | Cluster.yaml | Specified by the user during creation | -## Step 2. Prepare cluster templates. +## Step 2. Prepare cluster templates -### 2.1 Create a Helm chart. +### 2.1 Create a Helm chart Opt 1.`helm create oracle-mysql` @@ -226,7 +226,7 @@ Now you've finished with ClusterDefinition and ClusterVersion, try to do a quick ::: -### 2.2 Install Helm chart. +### 2.2 Install Helm chart Install Helm. @@ -245,7 +245,7 @@ REVISION: 1 TEST SUITE: None ``` -### 2.3 Create a cluster. +### 2.3 Create a cluster Create a MySQL cluster with `kbcli cluster create`. @@ -313,7 +313,7 @@ After the creating, you can: kbcli cluster stop mycluster ``` -## Step 3. Add an addon.yaml file. +## Step 3. Add an addon.yaml file This is the last step to integrate an add-on to KubeBlocks. After creating this addon.yaml file, this add-on is in the KubeBlocks add-on family. Please refer to `tutorial-1-create-an-addon/oracle-mysql-addon.yaml`. diff --git a/docs/developer_docs/integration/monitoring.md b/docs/developer_docs/integration/monitoring.md new file mode 100644 index 00000000000..464629df653 --- /dev/null +++ b/docs/developer_docs/integration/monitoring.md @@ -0,0 +1,267 @@ +--- +title: Monitoring +description: How to configure monitoring function in KubeBlocks +keywords: [monitoring] +sidebar_position: 6 +sidebar_label: Monitoring +--- + +# Configure monitoring + +This tutorial takes Oracle MySQL as an example and explains how to configure monitoring in KubeBlocks. You can refer to [the full PR](https://github.com/apecloud/learn-kubeblocks-addon/tree/main/tutorial-4-monitor-cluster/oracle-mysql). + +## Before you start + +1. Knowledge about basic Kubernetes concepts, such as Pod and Sidecar. +2. Finish [Tutorial 1](./how-to-add-an-add-on.md). +3. Knowledge about basic monitoring system concepts, such as Prometheus and Grafana. + +## Introduction + +Monitoring is an essential part of Kubernetes observability. It helps developers check the system's operational status to quickly identify issues. + +Kubeblocks currently integrates Prometheus and Grafana as add-ons. In this tutorial, you will learn how to integrate the Prometheus/Grafana solution. + +### Prometheus Overview + +Prometheus provides an open-source monitoring solution that integrates metric collection, metric storage, and alert capabilities. + +It is widely used in cloud-native, containerized, and microservices architectures. With Prometheus, developers and operations teams can monitor the performance and health status of applications in real-time, so as to quickly identify and resolve issues to ensure application reliability and availability. Prometheus is usually used with Grafana to create powerful monitoring and observability solutions. + +### Grafana Overview + +Grafana is an open-source analytics and monitoring platform widely used for visualizing time series data. It allows users to create interactive and customizable dashboards to monitor and analyze data from various sources. + +:paperclip: Table 1. Terminology + +| Term | Description | +| :-- | :---------- | +| Prometheus Exporter | Prometheus Exporter is a component that collects monitoring data and provides data to external entities using the Prometheus monitoring specification.
For more details, refer to [Prometheus Exporter List](https://prometheus.io/docs/instrumenting/exporters/). | +| Prometheus Metrics | Prometheus Metrics are data points used for monitoring and performance analysis. They typically include request counts, response times, CPU usage, and memory usage. | +| Grafana Dashboard | Grafana Dashboard is a visualization interface used to present data. It is commonly used for monitoring and analyzing various time series data. | + +Prometheus in KubeBlocks has already been configured with scraping jobs, so developers only need to configure the Exporter. In KubeBlocks, the Exporter is deployed as a sidecar alongside the main container of the database engine in the same Pod. + +## Configure Exporter + +First, choose an Exporter. This tutorial is based on Oracle MySQL, so a MySQL Exporter is needed. + +### Configure Exporter version + +Modify ClusterVersion (`clusterversion.yaml`). + +You can find an appropriate Exporter from open-source communities(e.g., [Prometheus in Docker](https://hub.docker.com/u/prom)). + +```yaml +componentVersions: +- componentDefRef: mysql-compdef + versionsContext: + containers: + - name: mysql-container + image: ... + imagePullPolicy: .. + - name: mysql-exporter + image: prom/mysqld-exporter:v0.14.0 +``` + +Specify the image of mysql-exporter as prom/mysqld-exporter with the version 0.14.0. + +### Add an Exporter container + +Modify `clusterdefinition.yaml` and configure mysql-exporter in Sidecar form. + +```yaml +podSpec: + containers: + # mysql container and other containers -> + - name: mysql-exporter + ports: + - name: metrics + containerPort: 9104 + protocol: TCP + env: + - name: "MYSQL_MONITOR_USER" + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + - name: "MYSQL_MONITOR_PASSWORD" + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + - name: "DATA_SOURCE_NAME" + value: "$(MYSQL_MONITOR_USER):$(MYSQL_MONITOR_PASSWORD)@(localhost:3306)/" +``` + +As shown from Line 4 to 21, a new container has been added to the original PodSpec. `DATA_SOURCE_NAME` is an environment variable specific to the mysql-exporter. + +:::caution + +Different Exporters require different environment variables, and they need to be configured based on specific features of each Exporter. + +As mentioned, this tutorial uses mysql exporter 0.14.0. In the latest mysql exporter version 0.15.0, the variable `DATA_SOURCE_NAME` is no longer supported. + +::: + +### Configure monitor parameters + +Modify `clusterdefinition.yaml` and configure `monitor` parameters. + +```yaml +componentDefs: + - name: mysql-compdef + characterType: mysql + service: .. + monitor: + exporterConfig: + scrapePort: 9104 # Listening port of the Exporter, used by Prometheus to pull data + scrapePath: "/metrics" # Path of the Exporter path, used by Prometheus to pull data +``` + +KubeBlocks supports multiple monitoring solutions. To use the open-source Prometheus/Grafana solution, configure the listening port and metrics path in `monitor`, which should correspond to the container-port specified in [2. Add an Exporter container](#2-add-an-exporter-container). + +## Configure Grafana Dashboard + +### Obtain Grafana Dashboard configurations + +Grafana Dashboard can help users monitor, analyze, and understand data in real-time. For popular databases, various dashboard configuration files (in JSON format) can be easily found. + +- [Official Website of Grafana](https://grafana.com/grafana/dashboards). +- [KubeBlocks Dashboard](https://github.com/apecloud/kubeblocks-mixin). + +### Add to your cluster + +Import the downloaded JSON files on the Grafana Dashboard page or configure them in your cluster template. + +The latter option is more versatile, as the same configuration can be reused for any cluster generated through the template. + +Therefore, two files are added to the existing Helm chart. + +- dashboards: Save dashboard JSON files. +- grafana: Create a ConfigMap to store the contents of dashboard JSON files. + +```yaml +tree oracle-mysql +. +├── Chart.yaml +├── dashboards +│   └── mysql.json +├── templates +│   ├── NOTES.txt +│   ├── _helpers.tpl +│   ├── clusterdefinition.yaml +│   └── clusterversion.yaml +│   └── grafana +│   └── configmap-dashboards.yaml +└── values.yaml + +4 directories, 8 files +``` + +## Monitor cluster data + +### Prepare the environment and enable Prometheus monitoring components + +Run `kbcli addon list` to check if the following add-ons are enabled (status: Enabled): + +```bash +kbcli addon list +> +... +grafana Helm Enabled true +alertmanager-webhook-adaptor Helm Enabled true +prometheus Helm Enabled alertmanager true +... +``` + +If not (status: `Disabled`), enable them one by one. + +```bash +kbcli addon enable prometheus +kbcli addon enable alertmanager-webhook-adaptor +kbcli addon enable grafana +``` + +Then you can have access to the integrated three dashboards: + +```bash +kbcli dashboard list +> +NAME NAMESPACE PORT CREATED-TIME +kubeblocks-grafana kb-system 13000 Jul 24,2023 11:38 UTC+0800 +kubeblocks-prometheus-alertmanager kb-system 19093 Jul 24,2023 11:38 UTC+0800 +kubeblocks-prometheus-server kb-system 19090 Jul 24,2023 11:38 UTC+0800 +``` + +### Create a database cluster + +1. Install a cluster template. + + ```bash + helm install oracle-mysql ./path-to-your-helm-chart/oracle-mysql + ``` + +2. Enable monitoring function. + + Opt 1. Enable when creating a cluster + + ```bash + kbcli cluster create mycluster --cluster-definition='oracle-mysql' --monitor='true' + ``` + + Opt 2. Enable in an existing cluster + + ```bash + kbcli cluster update mycluster --monitor='true' + ``` + +3. Open the dashboard + + ```bash + # View available dashboards + kbcli dashboard list + > + NAME NAMESPACE PORT CREATED-TIME + kubeblocks-grafana default 3000 Jan 13,2023 10:53 UTC+0800 + kubeblocks-prometheus-alertmanager default 9093 Jan 13,2023 10:53 UTC+0800 + kubeblocks-prometheus-server default 9090 Jan 13,2023 10:53 UTC+0800 + + # Select Grafana and open the web console in the default browser + kbcli dashboard open kubeblocks-grafana + ``` + +4. Sign in to the dashboard + +![Grafana Homepage](./../../img/addon-monitoring-signin.png) + +:::note + +If the dashboard requires a login, use the following username and password. + +```bash +Username: admin +Password: kubeblocks +``` + +::: + +## Summary + +This tutorial explains how to quickly adapt the Prometheus/Grafana solution to monitor your database cluster. KubeBlocks will also introduce a monitoring solution based on OpenTelemetry in the future. Stay tuned for updates. + +## References + +1. [Prometheus](https://prometheus.io/). +2. [Grafana Dashboard](https://grafana.com/grafana/dashboards/). +3. [Create a dashboard](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/create-dashboard/). + +## Appendix + +### A.1 Disable cluster monitoring + +In KubeBlocks, you can enable or disable monitoring for a specific cluster using kbcli. + +```bash +kbcli cluster update mycluster --monitor='false' +``` diff --git a/docs/developer_docs/integration/multi-component.md b/docs/developer_docs/integration/multi-component.md new file mode 100644 index 00000000000..ef0c497f584 --- /dev/null +++ b/docs/developer_docs/integration/multi-component.md @@ -0,0 +1,254 @@ +--- +title: Multi-component configuration +description: How to configure multi-component in KubeBlocks with NebulaGraph as an example +keywords: [multi-component,add-on] +sidebar_position: 7 +sidebar_label: Multi-component configuration +--- + +# Multi-component configuration + +So far, you've learned the definition, backup, and configuration of single-component clusters (e.g., Oracle-MySQL). + +This tutorial takes NebulaGraph as an example to demonstrate how to integrate a multi-component cluster and address several common issues in multi-component configurations. You can find more details in [this repository](https://github.com/apecloud/kubeblocks/tree/main/deploy/nebula). + +## Before you start + +- Finish [Tutorial 1](./how-to-add-an-add-on.md). +- Knowledge about basic KubeBlocks concepts, such as ClusterDefinition, Cluster, ComponentRef, and Component. + +## NebulaGraph Architecture + +First, take a look at the overall architecture of NebulaGraph. + +NebulaGraph applies the separation of storage and computing architecture and consists of three services: the Graph Service, the Meta Service, and the Storage Service. The following figure shows the architecture of a typical NebulaGraph cluster. + +![NebulaGraph Architecture (source: https://github.com/vesoft-inc/nebula)](./../../img/nebula-aichitecture.png) + +- Metad: It is a component based on the Raft protocol and is responsible for data management tasks such as Schema operations, cluster management, and user permission management. +- Graphd: It is the compute component and is responsible for handling query requests, including query parsing, validation, and generating and executing query plans. +- Storaged: It is the distributed storage component based on Multi Group Raft, responsible for storing data. + +If the client is considered, the fourth component is: + +- Client: It is a stateless component used to connect to Graphd and send graph queries. + +## Configure cluster typology + +Now you've learned the four components of NebulaGraph, and how each component is started and configured. + +Similar to a single-component cluster, you can quickly assemble the definition for a multi-component cluster. + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterDefinition +metadata: + name: nebula +spec: + componentDefs: + - name: nebula-console # client + workloadType: Stateless + characterType: nebula + podSpec: ... + - name: nebula-graphd # graphd + workloadType: Stateful + podSpec: ... + - name: nebula-metad # metad + workloadType: Stateful + podSpec: ... + - name: nebula-storaged # storaged + workloadType: Stateful + podSpec: ... +--- +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterVersion +metadata: + name: nebula-3.5.0 +spec: + clusterDefinitionRef: nebula # clusterdef name + componentVersions: + - componentDefRef: nebula-console # Specify image for client + versionsContext: + containers: + - name: nebula-console + image: ... + - componentDefRef: nebula-graphd # Specify image for graphd + versionsContext: + containers: + - name: nebula-graphd + image: + - componentDefRef: nebula-metad # Specify image for metad + versionsContext: + containers: + - name: nebula-metad + image: ... + - componentDefRef: nebula-storaged # Specify image for storaged + versionsContext: + containers: + - name: nebula-storaged + image: ... +``` + +The above YAML file provides an outline of the ClusterDefinition and ClusterVersion for NebulaGraph. Corresponding to Figure 1., four components (including the client) and their version information are specified. + +If each component can be started independently, the information provided in Figure 2. would be sufficient. + +However, it can be observed that in a multi-component cluster, there are often inter-component references. So, how to specify the references thereof? + +## Configure inter-component references + +As discovered, components may refer to each other and Figure 3. shows the inter-component references in a NebulaGraph cluster. For example, + +1. Nebula-Console needs to know the port number and service name of Nebula-Graphd. +2. Nebula-Graphd needs to know the DNS of each Pod of Nebula-Metad. +3. Nebula-Storaged also needs to know the DNS of each Pod of Nebula-Metad. + +![Nebula Inter-Component References](./../../img/nebula-inter-component-ref.png) + +Therefore, three common types of inter-component references are: \ + +1. **Service Reference** + e.g., Nebula-Console needs to obtain the service name of Nebula-Graphd. +2. **HostName Reference** + e.g., Nebula-Graphd needs to configure the DNS of all Pods of Nebula-metad. This reference typically points to a stateful component. +3. **Field Reference** + e.g., Nebula-Console needs to obtain a service port name of Nebula-Graphd. + +To ensure that the cluster starts normally, the above information needs to be injected into the Pod through environment variables (whether it is loaded through configmap or defined as pod env). + +In KubeBlocks, the `ComponentDefRef` API can be used to achieve the goal. It introduces the following APIs: + +- `componentDefName`, used to specify the name of the component definition that is being referenced to. +- `componentRefEnv`, which defines a set of environment variables that need to be injected. + - `name` defines the name of the injected environment variable. + - `valueFrom` defines the source of the variable value. + +Next, you will learn how `ComponentDefRef` deals with the three types of references mentioned above. + +### Service Reference + +Case 1: Nebula-Console needs to obtain the service name of Nebula-Graphd. + +When defining `nebula-console`, add the following definitions (as `componentDefRef` shows): + +```yaml + - name: nebula-console + workloadType: Stateless + characterType: nebula + componentDefRef: + - componentDefName: nebula-graphd + componentRefEnv: + - name: GRAPHD_SVC_NAME + valueFrom: + type: ServiceRef +``` + +- Specify the component that is being referenced to as `nebula-graphd`. +- The name of the injected environment variable is `GRAPHD_SVC_NAME`. +- The value type of the variable is `ServerRef`, indicating that the value comes from the service name of the referenced component. + +:::note + +In KubeBlocks, if you've defined the `service` for a component, when you create a cluster, KubeBlocks will create a service named `{clusterName}-{componentName}` for that component. + +::: + +### HostName Reference + +Case 2: Nebula-Graphd needs to configure the DNS of all PODs of Nebula-Metad. + +```yaml + - name: nebula-graphd + workloadType: Statelful + componentDefRef: + - componentDefName: nebula-metad + componentRefEnv: + - name: NEBULA_METAD_SVC + valueFrom: + type: HeadlessServiceRef + format: $(POD_FQDN):9559 # Optional, specify value format +``` + +- Specify the component that is being referenced to as nebula-metad. +- The name of the injected environment variable is NEBULA_METAD_SVC. +- The value type of the variable is HeadlessServiceRef. + - It indicates that the value comes from the FQDN of all Pods of the referenced component, and multiple values are connected with , by default. + - If the default FQDN format does not meet your needs, customize the format through format (as shown in Line 9). + +:::note + +KubeBlocks provides three built-in variables as placeholders and they will be replaced with specific values when the cluster is created: +- ${POD_ORDINAL}, which is the ordinal number of the Pod. +- ${POD_NAME}, which is the name of the Pod, formatted as `{clusterName}-{componentName}-{podOrdinal}`. +- ${POD_FQDN}, which is the Fully Qualified Domain Name (FQDN) of the Pod. + +In KubeBlocks, each stateful component has a Headless Service named `headlessServiceName = {clusterName}-{componentName}-headless` by default. + +Therefore, the format of the Pod FQDN of each stateful component is: +`POD_FQDN = {clusterName}-{componentName}-{podIndex}.{headlessServiceName}.{namespace}.svc`. + +::: + +### Field Reference + +Case 3: Nebula-Console needs to obtain a service port name of Nebula-Graphd. + +When defining `nebula-console` , add the following configurations (as `componentDefRef` shows): + +```yaml + - name: nebula-console + workloadType: Stateless + characterType: nebula + componentDefRef: + - componentDefName: nebula-graphd + componentRefEnv: + - name: GRAPHD_SVC_PORT + valueFrom: + type: FieldRef + fieldPath: $.componentDef.service.ports[?(@.name == "thrift")].port +``` + +- Specify the component that is being referenced to as `nebula-graphd`. +- The name of the injected environment variable is `GRAPHD_SVC_PORT`. +- The value type of the variable is `FieldRef`, indicating that the value comes from a certain property value of the referenced component and is specified by `fieldPath`. + +`fieldPath` provides a way to parse property values through JSONPath syntax. +When parsing JSONPath, KubeBlocks registers two root objects by default: + +- **componentDef**, the componentDef object being referenced. +- **components**, all components corresponding to the componentDef in the created cluster. + +Therefore, in `fieldPath`, you can use `$.componentDef.service.ports[?(@.name == "thrift")].port` to obtain the port number named `thrift` in the service defined by this component. + +## Summary + +This tutorial takes NebulaGraph as an example and introduces several types and solutions of inter-component references. + +In addition to NebulaGraph, engines like GreptimDB, Pulsar, RisingWave and StarRocks also adopt `componentDefRef` API to deal with component references. You can also refer to their solutions. + +For more information about the `componentDefRef`, refer to [ComponentDefRef API](https://kubeblocks.io/docs/release-0.6/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1alpha1.ComponentDefRef). + +## Appendix + +### A1. YAML tips + +Since Nebula-Graphd, Nebula-Metad and Nebula-Storaged all require the FQDN of each Pod in Nebula-Metad, you don't need to configure them repeatedly. + +Quickly configure them with YAML anchors. + +```yaml +- name: nebula-graphd + # ... + componentDefRef: + - &metadRef # Define an anchor with `&` + componentDefName: nebula-metad + componentRefEnv: + - name: NEBULA_METAD_SVC + valueFrom: + type: HeadlessServiceRef + format: $(POD_FQDN){{ .Values.clusterDomain }}:9559 + joinWith: "," +- name: nebula-storaged + componentDefRef: + - *metadRef # Use the anchor with `*` to avoid duplication +``` diff --git a/docs/developer_docs/integration/parameter-configuration.md b/docs/developer_docs/integration/parameter-configuration.md new file mode 100644 index 00000000000..d56cb7b2989 --- /dev/null +++ b/docs/developer_docs/integration/parameter-configuration.md @@ -0,0 +1,353 @@ +--- +title: Parameter configuration +description: How to configure parameter templates and update parameters in KubeBlocks +keywords: [parameter configuration] +sidebar_position: 5 +sidebar_label: Parameter configuration +--- + +# Parameter configuration + +This tutorial takes Oracle MySQL as an example and explains how to configure parameter templates and parameters in KubeBlocks. You can find [the full PR here](https://github.com/apecloud/learn-kubeblocks-addon/tree/main/tutorial-3-config-and-reconfig/). + +## Before you start + +1. Grasp basic concepts of Kubernetes, such as Pod and ConfigMap. +2. Finish configurations in [Configure parameter template](./parameter-template.md). +3. (Optional) Know something about Go Template. +4. (Optional)Know something about CUE Lang. + +## Introduction + +KubeBlocks adds configurations by mounting the ConfigMap to the volume. With a Kubernetes-Native concept that `ConfigMap is the only source of truth`, it centralizes entry for parameter changes in the ConfigMap to prevent configuration drifting. Therefore, the order below illustrates how KubeBlocks performs parameter reconfiguration: + +1. Configure parameter values in the ConfigMap. +2. Derive parameter configurations (add/delete/update) based on ConfigMap modifications. +3. Apply the parameter configurations to the engine. + +Different parameters require different configuration methods: + +- Static parameters require a cluster restart (cold update). +- Dynamic parameters require a parameter refresh (hot update). + +Table 1 lists four common hot update methods, including UNIX Signal, SQL, Auto, etc. Currently, engines in KubeBlocks can implement one or more of these methods. For example, to apply dynamic configuration in PostgreSQL, you can use: + +- UNIX Signal: Send a `SIGHUP` signal. +- Tools: Call `pg_ctl` command. +- SQL: Execute SQL statements to directly update parameters. + +:paperclip: Table 1. Summary of Parameter Hot Updates + +| Methods | Descriptions | Applicability | +| :---------- | :----------- | :------------ | +| Unix Signal | For example, PostgreSQL.
If you need to reload the configuration file after parameter configuration, send a `SIGHUP` signal to PG. | Applicable to engines that support Unix Signal updates. | +| SQL | For example, MySQL.
Perform parameter configurations through the SQL statement `SET GLOBAL =`. | Applicable to most RDBMS engines.
**Note**: The `execSQL` interface is required. Currently, KubeBlocks only supports MySQL and PostgreSQL. | +| Tools | For example, Redis or MongoDB.
Related tools are provided for configuring parameters. | Implemented via custom scripts or local tools, highly versatile. | +| Auto | The engine itself watches for changes in configuration files, and updates automatically when a change is detected. | Dependent on whether the engine supports automatic loading. | + +As mentioned in [Parameter template](./parameter-template.md), Kubernetes does not synchronously update ConfigMap changes to the Pod. For KubeBlocks, it not only needs to distinguish the way parameters are configured but also needs to watch whether the corresponding configurations are synchronized to the Pod. + +Now take a look at how KubeBlocks manages parameter configurations through the `ConfigConstraint` API. + +## ConfigConstraint + +As a multi-engine platform, KubeBlocks needs to get the following information to better support parameter configuration: + +1. Format of configuration files: + + Different configuration files have different structures. KubeBlocks parses files based on their structure to deduce the information about each configuration (add/delete/update). + +2. Effect scope of parameters: + + Be clear which parameters are dynamic, which are static, and which are immutable. KubeBlocks specifies the effect scope of parameters to determine how they quickly take effect. + +3. Methods for dynamic parameter changes: + + As shown in Table 1., parameters can be dynamically configured in various ways. Therefore, specify different dynamic configuration methods for different engines. + +4. Definition of parameter validation rules: + + It's important to define validation rules for parameters. In a production environment, developers often fail to start databases due to typos in parameter values. Parameter validation therefore adds a layer of protection by performing checks in advance to prevent such mistakes. + +:::note + +KubeBlocks creates a config-manager sidecar for components that have configured ConfigConstraint. It is used to detect file updates, send signals, and execute updated scripts. + +::: + +The information is included in `ConfigConstraint` (parameter constraints) as shown below. The four sections correspond to the four key configuration details mentioned above. + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ConfigConstraint +metadata: + name: oracle-mysql-config-constraints +spec: + #1. Specify the file format as INI and only focus on the `mysqld` section + formatterConfig: + format: ini + iniConfig: + sectionName: mysqld + + #2. Specify the dynamic parameter configuration method for MySQL, using `reload-script` to execute SQL statements + reloadOptions: + tplScriptTrigger: + sync: true + # Specify which script file to use for configuration + scriptConfigMapRef: oracle-mysql-reload-script + namespace: {{ .Release.Namespace }} + + ##3.1 Configure static parameters + staticParameters: + - open_files_limit + - performance_schema + - enforce_gtid_consistency + + ##3.2 Configure dynamic parameters + dynamicParameters: + - innodb_buffer_pool_size + - max_connections + - gtid_mode + + ##4. Define parameter validation rules with a CUE template + cfgSchemaTopLevelName: MysqlParameter + configurationSchema: + cue: |- + {{- .Files.Get "config/oracle-mysql-config-constraint.cue" | nindent 6 }} +``` + +Each API is to be explained in the following tutorial. + +### FormatterConfig + +FormatterConfig describes the configuration file format, such as `ini`, `yaml`, `json`, `xml`, `properties`. + +The file itself is just a text and requires different parsers. + +When KubeBlocks detects a configuration file change, it deduces the parameter configuration (add/delete/update) based on the format and notifies the Pod to update. + +For example, MySQL's adjustable parameters take the `ini` format and only parse the `mysqld` information. + +```bash + formatterConfig: + format: ini # Format of the configuration file and ini, xml, yaml, json and hcl are supported + iniConfig: + sectionName: mysqld # If the ini format is adopted, there might be multiple sections and sectionName is required +``` + +### ReloadOptions + +ReloadOptions describes the method of dynamic parameter configuration. + +Table 1 above summarizes 4 common methods of dynamic parameter configuration. KubeBlocks, accordingly, supports multiple configuration methods. + +- tplScriptTrigger: Configures parameter by template files. +- shellTrigger: Configures parameters by executing scripts. +- unixSignalTrigger: Configures parameters through UNIX Signal. +- None: AutoLoad mode, which is automatically configured by the database engine. + +***Example*** + +- tplScriptTrigger + + This example chooses `tplScriptTrigger` to configure parameters by defining the content in the template file. + + ```bash + reloadOptions: + tplScriptTrigger: # Configure parameters by template file + sync: true # Synchronou reloading + scriptConfigMapRef: oracle-mysql-reload-script # The referenced template file + namespace: {{ .Release.Namespace }} + ``` + +- shellTrigger + + `shellTrigger` performs dynamic parameter configuration by shell scripts, which is a general method since most databases support configuring parameters through clients. + + ```yaml + reloadOptions: + shellTrigger: + sync: true + command: + - "update-dynamic-config.sh" + ``` + +:::note + +The scripts in Reloadptions will be loaded to the Pod and executed by the config-manager sidecar mentioned before. + +::: + +### Static/Dynamic Parameters + +KubeBlocks supports configuring dynamic, static and immutable parameters and such effect scope is used to identify the parameter type and to determine how the parameter reconfiguration takes effect. + +KubeBlocks includes multiple parameter reloading strategies and applies the appropriate strategy based on the reconfiguration contents. + +This example lists some common MySQL parameters, such as the static parameter `performance_schema` and the dynamic parameter `max_connection`. + +If the parameter list is too long, it is recommended to use the `.Files.Get` function. + +```yaml + ##3.1 Configure static parameter list + staticParameters: + - open_files_limit + - performance_schema + - enforce_gtid_consistency + + ##3.2 Configure dynamic parameter list + dynamicParameters: + - innodb_buffer_pool_size + - max_connections + - gtid_mode +``` + +### ConfigurationSchema + +During the configuration process, starting a cluster may fail due to entering an invalid parameter value. + +KubeBlocks provides ConfigurationSchema for validating parameter effectiveness. KubeBlocks uses CUE for verification. It works by describing the type, default value and range of each parameter to prevent problems caused by an invalid parameter value. + +This example illustrates the configuration for verifying MySQL parameter values. + +```yaml +#MysqlParameter: { + + // Sets the autocommit mode + autocommit?: string & "0" | "1" | "OFF" | "ON" + + open_files_limit: int | *5000 + + // Enables or disables the Performance Schema + performance_schema: string & "0" | "1" | "OFF" | "ON" | *"0" + + // The number of simultaneous client connections allowed. + max_connections?: int & >=1 & <=100000 + ... + } +``` + +For example, the example above defines some constraints for the parameter `performance_schem` in MySQL. + +- Type: string +- Available values: ON, OFF, 0, 1 +- Default value: 0 + +```yaml + // Enables or disables the Performance Schema + performance_schema: string & "0" | "1" | "OFF" | "ON" | *"0" +``` + +## How to configure parameters + +Better user experience, KubeBlocks offers kbcli for your convenient parameter management. + +### Create a cluster + +Both kbcli and Helm are supported. + + + + + +```bash +kbcli cluster create mycluster --cluster-definition='oracle-mysql' --cluster-version oracle-mysql-8.0.32 +``` + + + + + +```bash +helm install oracle-mysql path-to-your-helm-chart/oracle-mysql +``` + + + + + +### View parameter configuration + +View the detailed configuration of a cluster, including the configuration template name and constraint name. + +```bash +kbcli cluster describe-config mycluster +> +ConfigSpecs Meta: +CONFIG-SPEC-NAME FILE ENABLED TEMPLATE CONSTRAINT RENDERED COMPONENT CLUSTER +mysql-config my.cnf true oracle-mysql-config-template oracle-mysql-config-constraints mycluster-mysql-comp-mysql-config mysql-comp mycluster + +History modifications: +OPS-NAME CLUSTER COMPONENT CONFIG-SPEC-NAME FILE STATUS POLICY PROGRESS CREATED-TIME VALID-UPDATED +``` + +### Configure parameters + +For example, configure the max_connection of MySQL. + +Based on the above configuration, + +- max_connection is a dynamic parameter. +- The value range is [1, 10000]. + +:::note + +For KubeBlocks v0.6.0 and above, run `kbcli cluster edit-config` to configure the parameter. + +::: + +```bash +kbcli cluster edit-config mycluster +``` + +In the interactive editing interface, edit max_connection as 1000. + +![Interactive Config Editor](./../../img/addon-interactive-config-editor.png) + +Save the changes and confirm the information to realize the parameter reconfiguration. + +![Confirm Config Changes](./../../img/addon-confirm-config-changes.png) + +### View the change history + +View the parameter configurations again. Besides the parameter template, the history and detailed information are also recorded. + +```bash +kbcli cluster describe-config mycluster +> +ConfigSpecs Meta: +CONFIG-SPEC-NAME FILE ENABLED TEMPLATE CONSTRAINT RENDERED COMPONENT CLUSTER +mysql-config my.cnf true oracle-mysql-config-template oracle-mysql-config-constraints mycluster-mysql-comp-mysql-config mysql-comp mycluster + +History modifications: +OPS-NAME CLUSTER COMPONENT CONFIG-SPEC-NAME FILE STATUS POLICY PROGRESS CREATED-TIME VALID-UPDATED +mycluster-reconfiguring-7p442 mycluster mysql-comp mysql-config my.cnf Succeed 1/1 Aug 25,2023 18:27 UTC+0800 {"my.cnf":"{\"mysqld\":{\"max_connections\":\"1000\"}}"} +``` + +## Reference + +- [Configure a Pod to Use a ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) +- [CUE Lang Overview](https://cuetorials.com/zh/overview/) +- [KubeBlocks ApeCloud MySQL Configuration](https://kubeblocks.io/docs/preview/user_docs/kubeblocks-for-mysql/configuration) + +## Appendix + +### A.1 How to view the reconfiguration process + +Parameter configuration is a type of KubeBlocks operations, shorten as ops. + +After the kbcli reconfiguration command is performed, a Configuration ops is generated in KubeBlocks. + +As shown in Section 3.5, an ops named `mycluster-reconfiguring-7p442` is generated and you can run the command below to view the process, including the changes, policy and time. + +```bash +kbcli cluster describe-op +``` + +### A.2 Compare the difference between two changes + +Run `diff-config` to view the difference between two changes + +```bash +kbcli cluster diff-config +``` diff --git a/docs/developer_docs/integration/parameter-template.md b/docs/developer_docs/integration/parameter-template.md new file mode 100644 index 00000000000..bcea62da59e --- /dev/null +++ b/docs/developer_docs/integration/parameter-template.md @@ -0,0 +1,230 @@ +--- +title: Parameter template +description: How to configure parameter templates in KubeBlocks +keywords: [parameter template] +sidebar_position: 4 +sidebar_label: Parameter template +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Parameter template + +This tutorial demonstrates how to configure parameter templates in KubeBlocks with Oracle MySQL as an example. You can find [the full PR here](https://github.com/apecloud/learn-kubeblocks-addon/tree/main/tutorial-3-config-and-reconfig/). + +## Before you start + +1. Grasp basic concepts of Kubernetes, such as Pod and ConfigMap. +2. Finish [Tutorial 1](./how-to-add-an-add-on.md). +3. Know something about Go Template (Optional). + +## Introduction + +When creating a cluster, developers typically configure parameters according to resource availability, performance needs, environment, etc. Cloud database providers like AWS and Alibaba Cloud have therefore offered various parameter templates (such as high-performance and asynchronous templates for RDS) to facilitate a quick startup for users. + +In this tutorial, you will learn how to configure parameters in KubeBlocks, which includes adding parameter templates, configuring parameters, and configuring parameter validation. + +Although Kubernetes allows users to mount parameter files as ConfigMap on volumes of the Pod, it only manages ConfigMap updates and synchronizes them to the volume. Therefore, if the database engine (such as MySQL and Postgres) fails to support dynamic loading of configuration files, you can only log in to the database to perform update operations, which can easily lead to configuration drift. + +To prevent that, KubeBlocks manages all parameters through ConfigMap with the principle that `ConfigMap is the only source-of-truth`. It means that all parameter configurations are first applied to ConfigMap, and then, depending on different ways the parameters take effect, applied to each Pod in the cluster. A comprehensive guide on how to configure parameters will be provided in the next tutorial. + +## ConfigTemplate + +KubeBlocks renders parameter templates with ***Go Template***. Apart from common functions, it also includes some frequently-used calculation functions such as `callBufferSizeByResource` and `getContainerCPU`. + +With KubeBlocks's enhanced rendering capabilities, you can quickly create an ***Adaptive ConfigTemplate*** and generate appropriate configuration files based on the context, such as memory and CPU. + +### Add a parameter template + +```yaml +1 apiVersion: v1 +2 kind: ConfigMap +3 metadata: +4 name: oracle-mysql-config-template +5 labels: +6 {{- include "oracle-mysql.labels" . | nindent 4 }} +7 data: +8 my.cnf: |- +9 {{` +10 [mysqld] +11 port=3306 +12 {{- $phy_memory := getContainerMemory ( index $.podSpec.containers 0 ) }} +13 {{- $pool_buffer_size := ( callBufferSizeByResource ( index $.podSpec.containers 0 ) ) }} +14 {{- if $pool_buffer_size }} +15 innodb_buffer_pool_size={{ $pool_buffer_size }} +16 {{- end }} +17 +18 # If the memory is less than 8Gi, disable performance_schema +19 {{- if lt $phy_memory 8589934592 }} +20 performance_schema=OFF +21 {{- end }} +22 +23 [client] +24 port=3306 +25 socket=/var/run/mysqld/mysqld.sock +26 ` +27 }} +``` + +The above example illustrates an adaptive ConfigTemplate for MySQL defined through ConfigMap. It includes several common MySQL parameters, such as `port` and `innodb_buffer_pool_size`. + +Based on the memory parameter configured when the container is started, it can: + +- Calculate the size of `innodb_buffer_size` (Line 11 to 15); +- Disable `performance_schema` when the memory is less than 8Gi to reduce performance impact (Line 19 to 21). + +`callBufferSizeByResource` is a predefined bufferPool calculation rule, primarily for MySQL. You can also customize your calculation formulas by querying memory and CPU: + +- `getContainerMemory` retrieves the memory size of a particular container in the Pod. +- `getContainerCPU` retrieves the CPU size of a particular container in the Pod. + +:::note + +Tailor additional parameter calculation options as you wish: + +- Calculate an appropriate `max_connection` value based on memory size. +- Calculate reasonable configurations for other components based on the total memory available. + +::: + +### Use a parameter template + +#### Modify ClusterDefinition + +Specify parameter templates through `configSpecs` in `ClusterDefinition` and quote the ConfigMap defined in [Add a parameter template](#add-a-parameter-template). + +```yaml + componentDefs: + - name: mysql-compdef + configSpecs: + - name: mysql-config + templateRef: oracle-mysql-config-template # Defines the ConfigMap name for the parameter template + volumeName: configs # Name of the mounted volume + namespace: {{ .Release.Namespace }} # Namespace of the ConfigMap + podSpec: + containers: + - name: mysql-container + volumeMounts: + - mountPath: /var/lib/mysql + name: data + - mountPath: /etc/mysql/conf.d # Path to the mounted configuration files, engine-related + name: configs # Corresponds to the volumeName on Line 6 + ports: + ... +``` + +As shown above, you need to modify `ClusterDefinition.yaml` file by adding `configSpecs`. Remember to specify the following: + +- templateRef: The name of the ConfigMap where the template is. +- volumeName: The name of the volume mounted to the Pod. +- namespace: The namespace of the template file (ConfigMap is namespace-scoped, usually in the namespace where KubeBlocks is installed). + +#### View configuration info + +When a new cluster is created, KubeBlocks renders the corresponding ConfigMap based on configuration templates and mounts it to the `configs` volume. + +1. Install a Helm chart. + + ```bash + helm install oracle-mysql path-to-your-helm-char/oracle-mysql + ``` + +2. Create a cluster. + + ```bash + kbcli cluster create mycluster --cluster-definition oracle-mysql --cluster-version oracle-mysql-8.0.32 + ``` + +3. View configuration. + + kbcli provides the subcommand `describe-config` to view the configuration of a cluster. + + ```bash + kbcli cluster describe-config mycluster --component mysql-compdef + > + ConfigSpecs Meta: + CONFIG-SPEC-NAME FILE ENABLED TEMPLATE CONSTRAINT RENDERED COMPONENT CLUSTER + mysql-config my.cnf false oracle-mysql-config-template mycluster-mysql-compdef-mysql-config mysql-compdef mycluster + + History modifications: + OPS-NAME CLUSTER COMPONENT CONFIG-SPEC-NAME FILE STATUS POLICY PROGRESS CREATED-TIME VALID-UPDATED + ``` + +You can view: + +- Name of the configuration template: oracle-mysql-config-template +- Rendered ConfigMap: mycluster-mysql-compdef-mysql-config +- Name of the file loaded: my.cnf + +## Summary + +This tutorial introduces how to render "adaptive" parameters with configuration templates in KubeBlocks. + +In Kubernetes, ConfigMap changes are periodically synchronized to Pods, but most database engines (such as MySQL, PostgreSQL, and Redis) do not actively load new configurations. This is because modifying ConfigMap alone does not provide the capability to Reconfig (parameter changes). + +## Appendix + +### A.1 How to configure multiple parameter templates? + +To meet various requirements, developers often need to configure multiple parameter templates in a production environment. For example, Alibaba Cloud provides many high-performance parameter templates and asynchronous templates for customized needs. + +In KubeBlocks, developers can use multiple `ClusterVersion` to achieve their goals. + +$$Cluster = ClusterDefinition.yaml \Join ClusterVersion.yaml \Join Cluster.yaml +$$ + +The JoinKey is the Component Name. + +As the cluster definition formula indicates, multiple ClusterVersion can be combined with the same ClusterDefinition to set up different configurations. + +```yaml +## The first ClusterVersion, uses configurations from ClusterDefinition +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterVersion +metadata: + name: oracle-mysql +spec: + clusterDefinitionRef: oracle-mysql + componentVersions: + - componentDefRef: mysql-compdef + versionsContext: + containers: + - name: mysql-container + ... +--- +## The second ClusterDefinition, defines its own configSpecs and overrides the configuration of ClusterDefinition +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterVersion +metadata: + name: oracle-mysql-perf +spec: + clusterDefinitionRef: oracle-mysql + componentVersions: + - componentDefRef: mysql-compdef + versionsContext: + containers: + - name: mysql-container + ... + # The name needs to be consistent with that of the ConfigMap defined in ClusterDefinition + configSpecs: + - name: mysql-config + templateRef: oracle-mysql-perf-config-template + volumeName: configs +``` + +As shown above, two ClusterVersion objects are created. + +The first one uses the default parameter template (without any configuration), and the second one specifies a new parameter template `oracle-mysql-perf-config-template` through `configSpecs`. + +When creating a cluster, you can specify `ClusterVersion` to create clusters with different configurations, such as: + +```bash +kbcli cluster create mysqlcuster --cluster-definition oracle-mysql --cluster-version oracle-mysql-perf +``` + +:::note + +KubeBlocks merges configurations from ClusterVersion and ClusterDefinition via `configSpecs.name`. Therefore, make sure that `configSpecs.name` defined in ClusterVersion matches the name defined in ClusterDefinition. + +::: diff --git a/docs/img/addon-confirm-config-changes.png b/docs/img/addon-confirm-config-changes.png new file mode 100644 index 0000000000000000000000000000000000000000..634c09887c4fa6debb9ef68f421c035a7583404f GIT binary patch literal 136785 zcmYhiby!s0`#p>ef`H^82o5a_F@!Ye03t2jIW$Ojigbx|i-6S7-5@peAl)594&C+6 z=lkIAde1*I7jrmg?|a9+*1FdY`79^#;`yuR7#J8YBtMBMVqiQ$e|?CH`4IhK#>}IO zf$AyWNTqUmhvFcXdyLVa$(+3QJ*%f&kF!LzD^C17Jf-jihA(k)rdq>Qp6~c+V$Z$ECj9iks z2mgD@pPvSa51wLplB$!$8%M&VVr^L+N4T)X#olH<7P@&O#jZ*$j;R15dAXt<1-$uR z{4fNA^q&GS_I17*3plJFxiO#&1)`bk&ArbzS0F^spIk9RFtaI!+5Ye84pj z6785m5&3Dkqr1*(9j+%epMO95@7I0B3)aW_I~1bJqfKQkZf$jlImu5ykCpjI9|{Fn z*U$m9Dr5)=#r7*G{<|Lo!!t~G2O~VKmKlW2=aAAi7bNl4UVhz^;KoBJS4=y`;k52N@BCJBEj zX_1!?V`avBb`zp(0**?I5aWY7<9PR5y?q?Fu`Pi+p`vkMl>a# zwr&ze)83rUURb>gdh7MHAPTG1h5}VmQp_b2W>?%joZawfVsEZ1`mtrk|HTVEiTp^p zF&ZQlm0~L-t~_R@F+Y+(3F3H>XK+eyubZ46kXZMMgg{BolP2BGkYW{j|MMnY$>?rQ z?T0M2wL%>H?*vSfOr*VZOqFN?>-7E$%b&hg@fUyEG(LI4?iPK&f3N#=Jnh%2{Eqzj zW4WEZ-EFfKZBMt|Y54K>$L3+T3>1&tTUf;Qk6o&1GA*3&2zeIJ??X4S>eSm#X$3{z zG@BM;GD5?|^^<3!{`hn3@4nRa@Xif%YY9e-ol#fkS$75i$b8K1Cb@{kQC1eSnVk27 zb8hVpe8dh{Pc#uXHdb>`;!n8RS}&@b;uAAB0xOQ5;K`-WP7J#`m@y{Fl@xH$f%OIv2VrFy45y$=waP1$B zfc1u6_CA-4=9s$fxNa06YnUyEHO}53(zGJ7hvD{qRiYm-$COo65`wVk9fl?i1yN@g z5N|!En1S^pW}BCg__bc!U;(8L03pJSE6z}wxKWC;7Qz^+sHI@7X=QDopc)r=SzoUI zo~J)Qpi_pv!n=t#t3x zS+gs@*XDea@;fyZRp(z>1C^lgA7T?EwbZ;RgL<_dgO#927^Rtwl9AgsdB=m2GY^LE z;!2wZkIt~DuQXG%EG*?HHg4wB4&ojqJ%%x1JFr^gzF^>-(#~L1QqtKg5H(;2VlC|YU^Ug{r!g`$_q+D<6gF#K85e9rXN zV97jyf=u)q(Dx->hHYZQl~tN^j`@7E3l)~HQGP}_)9P6f_V8%{FB^7%yO!4Blbe{n ziS=C5AGAUgK8Za@U4hvaE3?}NGAtj3 z@7ANez=&MxFcI&of%?$Ua(zaEu=sKzx*Ca(B_e>{oRQNi#t(AY%p;X@RDyqJhB<0V zYscvBJF{q9VG>WU_db73ZW`4*lSTkhBHAjX7=PigJ6PIT*OMrd4`a%%|IEBcm<$V- zk;VJ|-9UprImnd@)R(VpAqzAM4}lS=s3hbjf=m zD?`UgCbdc5N(hBb&AAg9Uuos|Z`6B3Z84=sDMNCMu{#6QQx#wByQ{h+%4Ap6rB=+T zz3D68n=ouw8o4k86YcjTtHPvS{0qNOex-OkSawA2bQkZuF`Uy>{NPoar16E}K|x9l zOZ$d}<*v0gM^aT;_J}%TwC73dV*iddFNN^X1heEY{aRRBJVzV}L5Q>q>Wh+zn^0rr zqT7nVes3(Yo~l0Rxg9G#lC%t8E^sY7o}5H_#!WpqqKl>o#kzPV8OsVM3Cd-W1P?dkmE}_AN1LymwuCVI#D2|p%$wIR*PrxZfP4*$7 zVN03WhT&X>ghx-%YT&kyS1H&{^DZ!hSo2QRlbmz=3gseuLf|Yy7w851xT0d z-P)%_%uxEhH-yS(Un)l;qdZ)|kaAFq_s%B?uepu>{-&V=S9(hg5@=yL=(=??ra@O7 z_rKO)ESO7g4)KXG#32MPU;9s**L?=9Vn>>7Qh<##Kv6Qgc{6DaAGNq}!tJ_|TDYm& zm>`9g2^xQWXLX(f63#(+vEtA8+Ge5ks7hdD&X>5;bfR!DPycjsGJFfR`RN#MLEEsC zI@(B#okLbz_oM#Qza&{$WpYU*u;e^*Ds4vqz0T{d*Ty6xybM;@5&mr-CM!YB;#7s+ zxgeN^dhjGJy5G&^!$#x%hDHfG#ec|~ZCl{XPHA8AwGqpOIhop@L(?R_`Tr`XIKARY zdjOYyyaM9( z?FmEl^*9FF>1ru_pp#6@DFqp5c({K4ue})i58Z%+531YKXgi~OGJ5jculN^u-OqCd zNoDB@+Lma9<|tJf_ynVSjoz^qtZNtMgR02TP*6iXo)%F@D_KqphBHBc2tGU7;iSxB zsFc!Xe^c2(jMvqI+uBB2xh+?cbzbq7g}Ibbxr*_U)0|Xnd_n*gFdW=6u`O3O9vf%O z9l`tTgLuqa%ePMjl3cyM5<~Xa-m^Z=bQC6$HmP8#s$#&StQFvg*XF&&P7`+g9JS_I zR=dezwN&^nJ+hP3une9IbCGL2v~~WDZKS^5Z`k2Z-H>&U6c_7`Ay4&5k;~UbGmls1 zPsh&?sih!#1q-mVrR!=+N=;eCBexSlM)+hwzAl5U^)!(6HBsU1{!W34F|ggSz+lF! z(c`c-Mfyf}uf_B7qPv^%)b%l34K>$=c~(`ECN#_D&-e_J0l+Y}k-bi@Vv%_P5Oqu-;f!0J)9SL-Ce6dgQPYkp}!FMZyt^{6fduXN@ONM4E zirnq=$-p~2(%#+Efv-r4k6sMTyWpLu*PyyLv0P)y2~Ye!wD83BbSmXz(=fN0D^}-hwB6W?{kcwscLwO^>fO0bgl=a&fO-E zs~jq-ntpixBU8S65RBV#{nP>Ei}npG z>z(ZA_vjSp$a=SSl9CmpqRrTbIe5 zgZB}_`1y$a(X}h)_zS3kU0Eg9E2!Apc-q9Pe<=+4;_+fv0M-Zp+Oe3F z;mU@CZ_o6BKk$R<8*p9rj?9xmk^o@Xk&Lj9(_-tN?hR=>+7f(9S%JwZ-O-3EJO-H& z|0$dMc1>`Lj3A#ye}JmF)7aTp8#Y$c1`)6JpLtT~-cZEquYIESoEuMaMFVgc+1c9Q9Uh63F>S8*K3Y1)S)N5{ zN24Nya4x)pqt|d46W<4y=m9}L7W*lC;H{WI-kOnILizw>AW}=r{H<=WbzdSUy`iZI z9G|)9Bb-2IVtB4N93ZOw=G*Q&a(Atfj4N*BX+_+|mJA9v+TlB|X?Rk+u_g&rhMj&k zsBzQlenkHCS@dDZyxrZ~`C+_y=?5Gsuih37h9_@%4alCs;iOq*yVV)|VNQ{bdwnF& zYl6CFXB&A^!DGnXFJIPE7bqy*&TXSzxp?-l5Tsc{ovxlIpSU8Wu-g1AbQWx`&O^7oOZQ~8g<;*FjkxI}afCwhj|6tq*WS6n|Lv`r! zvaJpRPLnIKHrE@eG?IPc+61HC&PhV7gWBIm`>S?pLV_n&mLRJ~Pkj9s_KrLRNuUSH z$($xG8}CRU@y06gd{E)Yu8^&cy_3neQ+0X98ufLz&G$pHm|Z1_0S92QG6IE)1}IgU zxCIT9A3_^140TM4E9(n);^u$Wsat^wbVQ`IC(A-P9b8%6_8y9zBY2I5NdM;~7&f2sPx0mogP@a26tnU~wV9IbYzj6Vzk$bYIg{ z^U(59GyQ|?M6cq@xu~Inv!|+0X9Un zTHk!+jY6!J^9j5!4s|1^(c{I>V`Yw+KvXf6YZK$<9$iFW2Q(#XQsO8-78NRDaf&0x z9&)8mKV!6z_WGj2OQE4IrnosI>8~32O@UT-YGGtyED5nniRh=H-GVXkC%-D5C$~*8 z0O|_3uYNX_7-FYFAaWcoZ2vCk>fJ3RDW&T*33Ks(C!Ts;Z?vHK9`gWRmgS^E98+b=uR z)q;ovtf?8L`R2zb`X||+;w2bzKT^rPK|C^EYH>yGqHo?@vsBIatVth%mATCW{3i`b zTk0q<<~r^l`bd`)HP&)?)gSL}!Bc8xtFlJ_Ucv*js(Am5ur`%$p$= zy_vvcJut|IFAgvFS21?B29rdVSamq5hCBE^)ZObIJ&tHN+tCO0sVjum3k!n-uwJHG z>JGLdaAP#|bX&;MlByPVU4m6ni4J$0nYR1(VKm5d)5pS$h&!bP|lnGOhLsFwR z4v0eRufd>r>EM?TBeK5o9T^E|I5AS|Ok8G3M*n^)s+Vy*0x7tcRtRkzfpg$;(NC%55G<_j zUh|@9uR}o&3>&b}a4cNcrjpkvQp9f-I@+Y3c}lXm0qyMfiQNRmh*8vtD=XwpXpbQ^ zUqk78E+;o9p7?Cpcy*|eowtwSL&9(IHSY!7Dl6qD_wcT6>L3bAv4P{NGVBBoq5Whl z+KD-2xKE?5f@g2pIY7+GGC&P`wg|9uf`tc{(=M;Hs!ziu-aoRAp*?I5WX|1EvD6W- z+msQ1PCGl@{Y*e$_PII%a8vshwUO8=E-QY)+$2PuHUn z9x+GG#Zv8_TZ#Y84wV}4_^AGVd`d|bH}UlZ>@?D~n=?NVkUGyR8g2GxJc4CjZ!Q}P zUkgbTfzmLV0lNsn*PQ?z#Cu}UpKd_x>mOewt_YWvR{2lziP^W!}j_q0V@jwaM zg__$T9_j8`N?Z~qUGvoJ)f+_P7m~w^O+f+OB+mg*SrwP2Z|+@(oo#Y~-@xNe?MCAy zU2o7s%_aUb;qs@ZDyQ*BTHyn1AAd9yCzM&S z2S4VJfjS!)(A63?&TY1m>mxg|v@?RixR;G$(eFMkN~m;}`^ZdC4mW7Tq}hk6eHo(2 z%1n!tMRXotXHGf}R3AP5`Th(0zb0v^D?OQ>Yuozy2qYby0;dhpjIMOV-5-`=fulau zT9Y_2xMBg-qvIo+bbgL@5;zseVujBUHv*vLmwEGZh`VV~B8MUM>d%aLPb~|WO-+#@ znIM}4VhAQeEy!Yy>uokc2I_I)NZ=J$1VMRNVuJ^Y!bNxDNPI3U|3eE~Opl?I+LyCV z0Y2famuhy^H-_4<&9&8y0eR>>toxcUp6sSp2Fn{}D$vs&bCo5mq;ThXr!&%?lQ6mV zYqyj^5_@RlyuWZ^oA2-VN4gO5kDF3~A^!XP+?hke2I2t_1x%vn=TA&Af!1JuJE$zT zos_fCy6CKTUhuFo6DL(f!jtQ(#`1=EERL8Y<3Ot9FDR z9>6&UN|hE2Apck3ARTkYjwIGlHF9HU`sAM%3UMwCpAv#e#McP;xJsl-E;eMX^vF~f zWN+>gc5TB7l*PxCtEzJj2cQz@;=zY-15;HZtfd|G^=JIk78aI|k;$-BVf)&8koKrk z^Q6dBYZNQg`y>5bVUAa}*zvCZXd0tJ=EC{$Y<}^}hgo7fC zMsRB=2;lX_Xr1)yghOR+L1uYdfw^wFnp^T(>-}gi22@DmR}qrHNy%YZi@B3JVbf#eLM0&zmkGj2Aq=c1F2{pDfRb}M^ zQwC}LJt+rr-HL?(+I3Xn61k?A(=%I4%-+Y-1yhh#Eyzi0D;?yF`lYR!3s;oK?*Zi4Z~bFazueqzwe z1x*K@!@5s@{C**7ua>Jv=8Av%40m^X!`l2Ei?q_1?@*AQPE)8c**N4MsD}h+ZUxHv zyut59+ShsN?)>72F#v2j!k0ZqrbfqjNvEvd=`o^UTVty1z$qCMLtr!;Nmw9Ip_ez3 zfzzc6zW4n*xpWt%0Yqjb^}84*5I}=Cx1x2w9Zyr|FJkJILDx0Hu*=qQXvCvf;Pj*< z1e-YXhI!V@bg_)(jXys1gFYrH^|&3uV>=8F4lbUAaUue!rL}_{S$W=ZI&9A0T&Qa= z4p+X*h|cUr7d9%j1g%0~jP>Sn2)5r9bxxvf{=Y{y4=yeoi+>Ct9ReWLpHm|{NmfFN zp8h{!l05uU&gz1bp>Nmr_AiT2ReQWgo=4T36Q4v*2zz`J<+n23(U;Iq(c`&xh?mIn z95BKyCg;l9k%_ zy+ydLsx%0DmpU8mW`u6`%F27>@BZG_C`wyx7{K506E)O9yvn%WL)&p_GNex*$ zWzc@n!at_!*?<-)K)gO;VY`Hf6}!$nkMJXTi_b$A5^M?1l$zP@Uw`iWh4Sm%RNI=y z2NyZ}0!c{20W7`f>U2|qC;-1vQn};w4qzH+zFRFHUY6ONV)_A@k=S1@Q$6>17pi2G z^kX1CCjk{ko>Mm?yn7XG+&Hz;CU;x~GD#j^nVo<~wmMTKa;LN6DE~NtNTU@43q~CV}799WLxm5pavKj-gMAIWilNd z-A`T8)4D<%+YKhq3^jZ|?~8ctS5x;Hswj3Ki1N+BDp4h~UbeS~FP4YSX&6G!wz?KP ziki#Y3@r`cee5>W&NOld9)?A>v=Pj0dht25Bu^ZT;I$x+%sks05<87FnuP=}rzR54 zob|MPaA;-5yY#O{j5SXQokVonE*uTeOV7^gK5F}qi}29A-WqVL|0}C5qavf3nutrr zlAnMKkdm0D^UuC#ho6s#wo)pLM5&5n`}`K8hA0|K+3j2_9rn|QCgMc zxwq@-dD*;m_>dMG;w9o6lcUaskSS)=zmZw~_qio&_GowzOdfOAH_nWV@3)S^)0d|r$rP7u) z1P(|??Ro62mWA4~$IGzMM<-MBjH|N1W6;Ic+5Jgcd(HdA{AZ+;1G%2FkmV)hb8@Sd z_BnBHV}D$;y(X$U&7wTSN~s8Q!7I^V=w*tD=XKOa8nJEL3; ztv5)d6c_d`J(DO&>m@zW^RY+e;|itp68^48uxGnHQr>s;_YbuuSz!rx01zHOh7dm%q~J3=KKLO0?z_Y1%?rg{_PvT)sZ zHh+1Qe#4Z6(1+;Las9V~7R}Xr1izJiX52^Gr0NDU+?MEFqt^fUeWF%iB*E@n9_{EC z@ws;6*Ka#N#n%*M<+t3 zW#!CFW$BG()yi=QVS#>ltwAN%7+QdllgM1DIBL)tXmG2+#2T4hnAfDl4VN1n=v-Nr z1Z|N(*by~brYl}!V=6V{6a8JwG$R!VuU%t|?Rw(Cp}ApY9oTDFRP zATY3grAY^AJ=^4XLJ}oK^++;aCGuI)cpbURwXnQ+_buSfw*>(wuB4vZt%?%PO^_;4 z4OL{9pWxQbGn%jiF;Uqk(axHc{}PnMhd~g%UVRfsNkPObJCX zE1p*P&1Op9ruI()0^Cc|u+9lwsyCSF4Q`}LA)F+*Pv!-j3TukQqc`?D`q2a``{<(A z7N9`wOkCh>+r9hlL4HluTZ(nMf6?Qd2vfz(<)C%Z zUd7F`T712Q`Y_`w=w}k#CZ8>J7t0v2&BgY7^nPMwACP=l{-AGqVXmws{{UUnR}7(5 zAqxK+lJyL&mgD1|mtVBm2#Zk38@D}ZSwAAe=AXZLWpkf$Tz{`&^uK}?Kpre~M($zz zCz_v}Io}+&?`4-yrTArqQnz>=5{E}w143zm5M0MfG_ltUr+fU-JN?YY=hz$SwAM5f zB1S(l{`o9nuc@={vr&%*b5qz5CgLUO3=5b9Q=y!dRv@mRNM9qbB#SPoXO2AWy}zJt zn#n3r2X$i}4>wOW&-kGDif9aHFsiJ$sx%d*hH{fi)sh?KVvz>qynk5tK}S=M5Sylh zWgY7r*SX5{s8b*dkLoO5>MWqndVEwBon^!b^ILEtGsr|vHsH7FN;@2?>S>0-C zEw|lg*lU)A-6k=@yKH*k-n{+RsEo)RS3clXD~K6p&fM!N6gy6(!Abm`-LmK#v_ z#NSrUbVko-zq-MBV&%Xrlt4DS)_~f7o_BmfUFjr+jV-ms-j5S?hhnz@2P#+r7;*Ay7c1K^%PT2jq)JbsN=yC_eN3Ap`GmT*Asf7NHb<2n z4C*MQ-!oR#2!>B@P50V|fzP^U`H|bj@$j!*qN&O9v5QgpQ_B1mCTs8D!AIlu(}A$w z3s*xk!&gEf?e=rH^~tZ8L^IHvGN%K!`uv<1-v{0ECQ8}3tdQZZ#UeG_2{o1L!>}XL z;yEIj^KZ8;u6IR`UKf!-KSezfmRI0e?!j4ZEGxS$g5Eq@i4nP~k>@;RSs9VQyJ_Ij zlQk;;@lO|*A5_GMH`n;9$JMW07QExHs-UKE%a-n4#m*_;MCt$c@89mZ;!~*56$sJi z_W<(got{{H{!_o#5hvI~9T(d*e8A|;)$+PXwuOzot;yZb?C`yGVeiZm<`QzFumH4? z;cmC(*52L(SDG1#QP*b|dAcKtIrr+-TKq|EK=NF8S0c}%KxGMw-Y9yLO<1=%`x}X0 zva@%&xD%q?{3$hgu-<^gX(eq%f>tVE;W|&D7&)cLAh7e&Ni%UZ!pIujar>09;niw~LjC$`~(uEwN4*C7} z#nuo2nJAT{EvyA1fH+s_t7C$>z$#ogBbC+Dm@@1=>)72%5>9${cq`_41?Kq$a>&C+ zcnoPhODrQ>_LU?v)(p;^x)P!~vc^-MTwpFfzu!%&Hy>$=m4V z!?y5RTZaB^SwkFSQY0Nd&~h(KMJ2X|dvq=A^3NpgGKJ0SmoBb))-^=~+$Uqi2afBl z9miW?sCBrj8>5+qDy!am$YW34^c(~T{=@onN!JD7ugtr2wylRiSrt!R7PP$A`ZBIp zwDnst!?1?TTVyIhKbib*4wbc~Ts6*r-p+V*EuSWvsSiNt|5&$+QXWGiAL-g+5jZ$O)Lp2wkZB2&b3IYx@DO~&wpx)_2aDB~%gt_-J`x=$yU z%u>CgqLc`&C{cg2YE`W3pqJxjrsrYI=;~l=1SF^+w@On^bLvLOV|jIiG=HPoo%nP;hq4a0R5zUM()+WWKC6a8L zxs$xdfEbjKgb?<12pS-L9b8c$_W$cCXbqkpn`OjYkMDGRWT`3b#j-*{n94?k1#JC_ zr;yyUPOvGXBkrbQ{Dhfk=P`66Ey4-5oN8Jzb0HW`bz|e?kSi0*MzLiEs{sk~f(OF* zr}&2yLb!OwczAfK-|+-~dr;X4R|}6~FRGu0q3bo;8LoFrqx0W;bT&Un2gCHmfuRs( z@p33o(dbG%leeVcIkn{_GCcpoU32_X;La7t(UaxEgn6RY+Oo3Z?7Fh5>@pC=EUFo# zs-Q76{aK5>|Jh1RcRA+Nh?l_?^!?ixmuZn7=u^6l&?O5hjdx=EisTIc;ZFk=U#~n4Xg$d+Do4 zniN|H!30%!>gGav?;7iZ+pq3nEr#!v6BD@(8X-SEQ5z?#b|b|@`(?M6=bp{6MEIZ` z4R_8c2yNWdLc<=~jkqCa@H_VfA@#ALsgkL~mx0l?<-e<3ibiEB-^OJYsmXe{liBc_RZYn1V|6v2eQSldf z_VvJ_@#)zmqF>E-w!_UOMHYToK;~Y(rn8)`EXi1Jr<-vGJ)P-eb(*VYP*Y7&U2*yK z{A5SR?89i|va51kMEbi=v`OV4B=k*Re zmCBD}qvxzU)c?D8mJ_|+J=(u59y~%1q|~-Zs7zRRi(WALL;hr%nHeprX(vpr*QrfHRjb& zm!Pg&*QMzh2HZO~pPRj;P8m4ryP)9kNJjVJi11LL!*`YJlBPI)u6La2(m5eH)+x$m z)O`fm%t7n^uHE%IoMx6qk9~^*8E*n zYNO*C!twJ@X!*^dzIeO}ay&I)I|a>U3kuIY(Phc-qo{lhniQJ(CJM{&)No39Q&PhG zQ@DGi1PjTDmS0xY+L}%C@LkL8F+(_bq+H~wga^M+=r6e;`Wg_Zol?%*j&$;Bzp$8}rR@r6wCRy6v;M zt88h?hN0!p6M^=XXt9|s%au(K!N%dAMITpW6?E{GPV`N9$N8j${pK}mi7_tdfv5x@W!9L)7fJOVo{&}oJA=um|Z*cwStcxK_ zZfLWsF%CVAa`8@@yso>^aNHFn$L(ZgXklgbu+HaGsmRlsD6uk-1Wj4r+0z!6>u%;} z?g+i+5MsW#0i8zok)rAdWn(MDS9XC!#_d>x_p@o==(FmL$Wf(Kh(=LYe z?xBY5hFmlqaVBoVObY2}u6N(QyaBGKK5aA>4G%ClJ###*;iD+Wl(CE#iyr6+@ElWJpCH*$i}HKn1y}%q{bToPH&A z4|nuaun|QSP7W9$OK3+CJ&}C>gv`m0Z!xkIKN8WzjC@GnTWy^$9?vj5XExdY+uUxP2YyGUxtR$3r1;{Xf!5R#BR93Je#2Pl*O$vFJ;PdXx_?d&G zX0RAx(oPf#K_iqFr$W4>|JmLi1XquyO(F>6d-)Szi6=0}6m1QYqulsnS;&SR)TkS2 ztEmVf(y%ABYsa}SMlHT=&D-mM?d^OG!{VEtxS1aEi_7@opdYE9$_ZB%zuibFNY||m zO)FfcRMt|{Izr`Vp&i%BuOq=LvxYKJo1>6pDxV_-)`YH^)ZVMY+J=C>zV^GLmOBF_ z*)cW|k-LMsut*T|$+ZJ2?1|s)-Da1_hLy#6`-ODN#qlhmZfR2t7+@mmHPDdzy^3et zvOJu9Jv-8ui?8O~=Waa+dA zI92cXZ>RILe6&BTt6`L^tfIATS$?Wb8PEHmb-?vj6#ZVEM(KyWJ1Mj@U}(q&h~_Z%<<`piED$%J);c za19`6HxC0ri0}$V;~^M7uf9HW=oa8A1f!OY21!PV9ip|69F0rBBnzKJeD;Ocmy@cs z)~{+KJJqoqh?04#hx3zbc3lv*IE z`XH$@uj^mTva}A)*sB8ZNfF-b1B36~72;6Xb=zavRF=D@O3=o@LXU1y{sPPK@R z+zSiH&O$JLu+-_ZfAYf5?*j!~4NA6R3R10bw}3m+kJZ%-%g`*TDzrJCr+x;Crz*lwu4klkcNu0Y8ot-pT%t>NyJbfjy{8pWL0rh^OS$VbRz zR%b&)YnKAk_CVbY^BaHt#^Ulb7*}(T4c6Y)#JwdGV~{WuM*eed%wL^f-Syd!GN!n~ zbn;do#8;-cv|Tig9ljfJ!V+;Z1sP)RNAf?yUOF&qLhFth@2O+Fp5_xkcPPiCi8U0A z;I+0?FEy(0zk)mJZuBZoPuBFPkSN-#5lWW(HVbMDvSVOpPlD0^H)(bJ;n2nN^cADX7EC>HR1;;g(fGa z#>d7}S%@k&k9!}jHfDmZ*O`MFG*i~0SfV^{9^Du1bU}k=Se2Y--R?8VKF61vVJ=T^ zj|bvTD#B7;J_IAkkPY?mopt7#Y#kivvrFb0Ih{XG17_TL$O-RT^#ZGy2ID@W2l{x09PcAX zQsLF|2qoXYXuXXj|2jczO8~K&rPqWV`IqIu(7xZ6F2AAPSJSehu{WHOQpm%W>m3SY zqZz;~thTDL#Px(ZQc1Pfdk-c)ZTR+qHP7@5Jc9;A0@_zToAY%1l4^(BdRj6L+_G@C zH!nP+u5&Svc9rhwKo`TNoRDyHF;hahWySUiK6tyUbjF73HpfVGQn+|EqiIpVj9e!v z)4u${j&KNg*y2r$4jHK<@`F-R^*!5Uc&1mcXa%f^7) zy`(qOea{2A=pOUP7-$4oq2`-1uJAsBMy4HGi-a%`X%7IWEmae9*fl(efX(?>^B@RN z^^$b^+2?xW_R^V#FT4v}qlW&eUB)#=rz_KqQ&Kg3gQ`5U3cDmYl%)`RW|BGH1r&gjR}yKY2$-Lh$Pa_zeJ0>BZQcf0r<99HFhjntGN=s-h~L z1q_k3*k;Bd^JH!SY1-X*l(0%zPvB zG5I4XVf#o0WXHPa_&LH7)_Yb{gGQ%?I6;>U{OJ0s0}o0)yv1wX)k(wE(FC=;>l=3= zZ)!qpFgrIitf-}=rtz}}X-Zw@TNWb2??-1Yt4;#3ogbh5i`HX_anXecwLLttoT?sQ zG3%O#+&rUI+@sveDz=DR5^D7E(hyey$>Bh~_IT4*A;VwnGV`xylCfH$6-QIbfv#Mw ze2S}6pRDY?%Nnu|Y}Kw99I!jw+WZ$YdNwnyk6o(L8|+Mh1a#Y|d&4Y-?t2fMKPcEV z;f{u=X+Jp~Ukfmx&}42wS?Nn6JzSk<09`bK zCo>5oVl9o0cDgc=M9@e@;RB}T##LkQI(nwTtIF>tX6Ee=^-E%kr?zmcBGc?-RTg-?&j+AEj2WArKMg$(_4gox7kz_> z7ZLJ?QqqG4<}SzYHb=e8l40nh6eKuM3-2MX23v2Vt;=J>V_O$f9hCqNH)IKcBJEFY z8QQz8n;N-er@PY%@jRFNxSr!qN@LIi9OleyEH93D1~Gx&S7V*6Wmx`0QS+!XIZ944 z<}G*gmn9DVdu$?Cnkq&nnqF7a3w&axV&)3ujbw7F-uc4a zm5oe0BVLgA`LsFfxQ(|{g6^|#x9N>X?oJx#vnK*-dviQwdsfz-cgJ>N8mOHEO6v%0 z1CI5f%Db~ODrorKrQhD`z3o>iNmoI4rv-=Q`|={!y7HDs7vwGgQUDY&IRy<`5F8h& z!oNU6q_6{jiFjSUQIGY%Xf<*#;%)`mBAq>~9?uTZp1?bH^10gyvC7y_VH*@l)%Z;7 ze!7uyCil7mKuTTXUI_`_OXEMC--Ob{RHW-PmnD8u{3f9FYC+_r*XMF^_wIVZhcJW= zA1+*iyej#LdErH}k}}WSN*gnHw>;ty4wed5Q{qEzYU?g4F46-y+xtmF0l*~1-`%s4 zqt;KG9ZeUDph0*pF50yoBCjGupWd5%8aGY}zCMAVOFw73Xm>5zpm=#vhy6+Q2YyAA z`d=hpbBjPaTX7}a)!O?d-^}L%W@cK5;?(TuDo*C_(FCyxWq*3!bT*twVX>SfSiYfQ zSVRegVtQievkI8~-9f=%^T+rA%%pUk&%{$NmOR2OA4WYKSfA2XeZjn$q7 zlC0BU4~gx_)PolB&Tm4*6!8gIvD0_95yW9Fx7XL*`v66wj>!(s-~Ch@+h%;jBNv@( zmezUkT)Q2&lu5&E#q;%!n@@+8J0=GSd%jMLj7&Gy3Km7Y5n{_b|S!iGxZaM0&`^magDT3Ja)gQOBvy-7puPdiLjlam0Fq*O6Z6$mSy zU6pX0(36dF4VqWXR$5MStU}Bu#L5?7)tzc@FfCGB#p5zy7-W)hO9`ZEg3( zNr=okTC`UsqVB^4s)FWUz49{lsZl5IDg_aSKcO$Lh*Qq=XOw!2A#fD2T~18z-yQu+ zOeI#

UC+&)!Cw5V`|j#9U5wSEj{hIEVW!O?;4zT$ zP{-4C1Xyc&mq^%0!q*xr%2+qfzRL|p3=R-Rnuxp-H>tO~xwUnXhRSP=)TOxau5?#g zkXxi!6p#?+dt?z#X1$bzgc`0tuaSVGwV(v5uQzx86q~4hv#mXZPPohfP^GLw2;N=F zy5M2ErElXQT2z>yV_<^a>2ukhOOS0CW)=FfnirZ%6E2v%Ze7m`2)rRghFZH>j+D!w zsl$+Rr*>!;cQWxkCg+KReQ9CVll zsf5U1S&ppfX#XOxKq<+#R}l+sJB9E{r_~5cN$_?icEYm<3qDJIdq72MC>(Da#?uf{ z@dSOccuYC=O9R-s!9x^Ni)2NcSG*YjpEQzT|Bm@iRw+k0K2PBM*8+p&uqPJ+v_85= zkT{cM&q}iL?b(g+x3{7cb|?knmM%j1?3GQKO}m%j?4h}w;_bAFAfXq&?CA5b+ktND zpBILHdi6E<1-y^_@na$AC9DLLo|wGm4-$h$Hd0x2IrS&5-5z{f&W34NZ-RiqoL>D& zX>k*vjvU&bpLECo*Zch;YVHlfS6gi>6s-Re%R;>lmHCSG?wE3CmU&JmPLp!#RGJ(Y zNSfz`0FKBQaRqwiJQbv56)Vdtc|}12S$*EwC1nBN(=rIq5^=j|L0bkVh3LE8aDXk5 zhN;Y3mI$G?ykF!bE8emrL}z>0Z3X$su#Fr#oQl!me77P23DG5$w+Z-)l+Ss6I7$hb zTK)B}l1HaFgem~9({*eS5%zN8)gUA5t3jrLjrFkl&K>qWh1g-n1jcfZG`jv88rV#~ zY#3f!d#lg&6G5@Jttk4S8X2&5kLLC%CP#4CKlU3u;|wKRLA6#PE)5`9YyH!QMn4%I z(g6KC$w)A5c;n+ghj)(1S;z^R%nET~#>KO-z4()+$0wvKpA`G89d@R z*AFx2P>J%nyQ?_UU9dB9`?=7>qkSp!W~u4y%?EeNk534EnBH~#;a`l_(Dx^~;(!BgBF3dOBJDHhzVxVyW%6e(WZp+Iqm z;ts{#-AaMt4#m&<_x`_cpQ~IXd6GHTny-v8-m!k!w9z(<^+YnA*p@gkpqO!qGt;^d z2+v2a+L5M+XAjJWEbge|cpa>VEOQ1Y&_luKaGc{(=1q-xa*iX3*ol~J-f?GED?r-i z;#v{>@7s3dCBXn*+6{BGL8~bnAw`n&AxzTt#hfHD%NbymqM?LYN}JY~d^IdO zE|Msuk7P}=>>VF@!ywlm^x$oqaL!dh`1#6mUltESf@^xW(AO2hDZufLnw)4ya0H5$ zWkq~6TAxhIPvdVG%rLavhw99X!WyGE3SC)nCMHon;1F^&{^rTi^%^joc`utCdHE0| zpmhLayLICLW9uf+1Zi2_5QpIcqYut=b@4C?Ox0?>P3H(Bk(oN9P8z@_>K-6su%DL+ z<{@w3Sybmff3_>&z(C8208wP) zY#peWYZMB`Y87D!g`N4-VB`3`^?MEP+f4Sb3OSQGHk{OQ8diR+I1e;57!lnz{Ad_y zqRcG+x!JDbsxtOZP@dS4z5iZcN8YT|tl}n+4y+uyxVHBLW7HMqNeM9tA*y(OU{G|$ zSV0>R*LonEnkBm#>NKw*6Dwy4FR8XJf_q6#p!$58S+vm@1$>whQ&`O*qIp;kJ%W0k z;TOp4O-jy-K!aMF^}DkM4@uEC_1G|^s|})f;-9}J>Gw>cGp4(p4K2+b`}&&?R2@cU z-f7d*2ErP2ceNE^A)gz0kk6yh$jV_^6kB*p48(GAA6He95a-flAcE0N;WLlQ+)P?b zYfni1x;iWz^u|v!A{TOAsy1L6(_m3S1mM3jejA%*++c%r727{%w^WAj;;pM(6i9Vd{AWHw|9j=;bEI zZerv=;p{!nV@|qWW7;65z~?m-2blX)YN4dD$E~VqXrsOPnFilZ2A@ZR@8Dv?k4JhF;dz4{PH-LV{j@SvLjU${PwCTy=RIyN_6gCe~DS zmf6{6czDrhhi#8tZTU#8VN9(&jStOj_J{Y1a9(z!rK>f-L)m>HGyDz})|sCq;F*&;fnsRk;dAHEM`DQUW0lm=ysK+Z8(7Yo?zm-vpH3GYFUCA> z5PHwnIlXdmKc8t0{Tys4D@`veO1<-Xxf@DOCgkfoIe|USk05&=;P%+#^EU3?rE?LZ zwHN88r4{<1aWN)W?$YY@%FFt~W}4B~_VB?VV49Nmot)QiM#+L-@vb0sjX**uR>XZaqk2;F*r|-DGIr91A==C1*{E6yL!?OL|XQ20?ZXaws)Smty?) zdnvI+bP%OCG>Xi1M^P|0*#$|IBS+NTX$@?}~imj`Wd|vzrL)8>kbxx`! z^7F#SXDzEBPiS@qru-zc5}c@2vSckEd9;AOqJjk5`hIp+Zf;u6EE+YVKp=8DrsQHq z1$y7E6H@y|G*{Q^!Iz&qE_p+`^jT7thCc5#Z2)(c1AhkzZHn=u^sLNWJ+LGdJN>us z3!!XVS5m-zq~uHJ@{b-3Pk=-6`Hpl1YldO5HJ#NOziAk2)w!Uqkg? zL&&$jq3QL^|B*duHwn3F3=>(JD7?~eg`2(G(8Y;1vfR4>hlbie{l1N!BO1nR*q#tB zqw_|L04^AWgAB2L=?YAsO4RORiJ2m6tg-IiBVcRfq+9iaDX&pFY%!@LXt1dy2v#rE zx7;x1218}Q-xx)G)A`CQE`%F5-x(Tr>1yiNCX4pd5IVQNNDQO)WZ-W7U&MAmSdD~f z#3`UanJ*LFDvHVD%r0 zbkdEP>~y0uJ`SCYnm#<-NigDgNCR|}o9#&AFia23aExk~Wd^C<`T{x0vs&bQ?X88N zz#3<|Thqm+;*=$T=0)at}i<0a$CrtFbP@2~CW|Nqa0ouC-1s`ip+^Zxp} zd%8WHt~A8J$W0sC6L{t=>+=}NJD+4*^({xnpcXfNz=qXAPPn8tAD*&VIyH$SbruHn z<57#YM#H0Xx;w|W`$)&$C%JLF5kL1x|No+JrTHVv6eJMa$74PTugFV4L0p;J?mdqE z$wAk`j0AgVuQ}}BI7Da(7mp8WfZhNEs;owuo3oqK@jo@q|3-4XT|$$<+GE#N94!dE zoYcoW!ZH-Gi`|6yuv#}_Mi?AOlMUwnzk5zHAv?(_El4ljT6GiwZ#ah20jAU{Vh!5% zpp{B#vQHZOed8RH^>5ka$U7Ntfz=(&{)IwVHoGQO{+W*P$bMLs$NJwz^lg8%$eBM_ zoFKl*Lj3=}NHP)ii!gG;`QJJPBmC-QyQR=CSl#ZsgB}5Mh@;=J!w6>XjYPoDo1iG^ zp~m*?dB`P}BtuZSON-x*EPPk!ChI@HvtRhD1=&!75QOv1Az3<|UE)i2r%e=Q?z4Il zjKr9rfoA0K_kp5lW!SWv#{t!vc^qo4gYMX^(d4#A+CZcBkv4Klm*mdcX*P+{7D?9U z_jdO$W(N(UGViBv9oLf5&F($$}Fx@N07nE8Sv# z`)i>mRUYwx_s!pgK*;wbS_2YcmwA==eI0nu>sKH+kCFI@u74-$& zAD#BY@vC9MZ-T|Xn z4;}t**E4=ywxW#kmcj|n_4UF2)v->mOCfnX4c6K0seSReiRqbdwN_o<`nS7cu-JYs_Z8}RZZd$_Lhs1^%GZ!+~G(hW7K7+!(7MazWbi}cYe|l z9^No{LjpL^=xAi7O*s+ch^ba&|9sa=s?eA<<(Ck%76mIct7(2oOyJ{Duyt;nW=utN z_KohoZqKo0R>D$^O&fFFv*CB-%kx`qZTT0xfkQh=`PRQi`W9HLH;f=`ax>2xLSE0! zgS+38o^PsO**M-Ki^G71(41>(Pp|XLVe&TOFJAXGy269c>pFP?HK)}T|eytMx zQX-Cc&7Xes6Z>&NLl-^ng5w=op3|luFz_&N!Z?9RP_l}(VEGgZzV|jmju%9?Xh(?n zh0`fW=Hp7KVYODWhR+6v1!iYojR+PUHhKEl=*Uv*>*!zat>5~n+`hAOZcmJgQ}TJO z5g8i#%_YpxY2fIWb{u(N>pHLn7_`-sIDjepcv<<6=AYY+Mj~;fb5KJGO%Vrw@Ea9@@eO&GDdNL8GWw81EN4TC0&k=NJ7ph8O(3T+dS*MXE@*Ato1_S#4Zw z90MTwkx?(Z&8o#a9!ZnKWPZ%!9XrE(>((z*u_Kr9@lGJI0fMBE*2`ZZMX0d7A+`?g z|J_xd0xCXTN1olaMp^VuzmH^5kzRN zYu*wTl?R%(8>Bdk-uT=uA0o}Oc6IdrjN`4JzXP7v-|E5j&22D&yuBassir+*NqLtv zed-(htgR7c5|-;}-m4`?6U6} zUf3=8zH)>1tkEW;~3XdRe9<2-tqbth42$|fuY|)#IFCh&! zGMo|k=;TW`REQqN-Q1nv06?_(pHSE*hnYFPEh>3VU&lkMCdkS7De8T|Sp z=@2VF`I5Tn+kANm$A1|6p8M&gcU}jROmua?W(zpe^vpx^u5cnH*52Yq{$3(vHTGP@ zD6hZzSn=?nQYq5V@{-|49*0h%m~t@XD#dL9wqycB?3;(*m*Ynsl{A{N3YC0X4&(w# zS((h5_)0Q^7JEYNo|OTnI4CyJ<;R95>+yOwT@Sx}X|`SyXWs_A0C^ z={HJsj2*yUBoh=2cofvB?YV8YekZ%h+s?`_Fn$Z=K`ZcZed>!^G~e5U@vePyd<$}} z=LqoprKGIz9y;ZY`Yl?1d^|I=;g?wu-y7&A|BZ~lxe^bWTq#o|Kt(x~7uxOeQ@2e% zTqbzLlCiPrmukE9IetibBo*l5^Re=RL?eSqm*yRPCl%smt(ajs*;<~Puj*}@T-T*K zf7OyisYYoU2FMZwg1-cpGMkn98Kzl%_$u~C={-*qboA?JN_&--^QVw0@l;!L+W4^6 zB2Jm{9i;gC_>*RZ(l1}>Dd4aff08a&zr)e;UgLE1wD5CtGP3Y#V%%aKog~f90NP>v zF0}~cf4T|Qq!aBxLO5_g_w%o3$s~wYg8Sm9*6i3Mb!u=B6ADZ5j-q}*DElpdYrxNj zmDL?4^v;IgVkjyM?!41=O-iPXfN}g@DiU94!yNb4V(*IM^SIl*r2BicI7I==laLfj zFcie#cQy2)ara(+Y{H;r;{!H|Ss}tL_gtr!XD(Aju5LSGCDHrFX$J$<=e9x2w_GPj z{C9&z=JlVgkch0yqfnVKe>GU;bFG&!Gg_BbHLotu^79_vwchf~q{rhelJ^m?4V7;w>JA=+o6 zl%l~BOoI&0pLtpfyPPR0%jsGtR#R)5Z7FV?*>MjlZt5CNfQR|$VoGkhwHD(ui<%dF zJO&rTeU|$njNN}}B`6C+Mt&Hc&}aWoo<;^Sa7b7-{kVuRERbgJ78l$7T)Su@a}8Q* zebLxhSXrKU96IQbd)IGsa<<=yV-c5+5Rb^hlHG0kpL@~cC~U{SJQ*g9VC)W7lQNHv zwrtKXF3eURwl40DO^#GBjkye6zU#0;f6$loxH|{;{0B*{s}YtvD2bjr|2emu=~2m} zER8Hpzpkv7l5U$BxvHf6j>1#}Uz-P;4%|Bi{iAGAeUdOvAc{cR#E}mNZs#I> z`S;u#m4e#Syy?i^r?AUk6C3^{vP+dmqq0wAcQFgMQ&560!ynYZd7+j+EG#SP*c2Vx zIA)%Jp2T3I9nN4OLHVBw&mrVLfm(#Ws?35_A1OpQwZ4LaM^buIiqKeYO<^Cn%B=o( z$n)r+rO!DpuTL+SkuG&t^&G7}NU2ZSmH9a|{Ass$qw^eRYB;`jgRfsjTL`; zb|y?)l*j4m$5wSTxbb73F0ZP?ype)vU`8|Ut~?aZ%BwqPHw5^&O$6^$4;}g9xVVM8 z1>0Q#^I|07grKT8xrz4I2gdx;*o7fZ2|{$wkNfr;kB7gw>vi`C3 zw4PM=WnWj*+gRYa4zFe@jZZrn_U$N?pLILGu12#xa}`f`*)CvxtTZ`47qOwodfvY1 zdnC5ze?WAFLa<7T@NxIgO3Txm@0v$6x%=I7CXd0Yp6c?->}yR2D%;oi zz1JQ_BwVeckg9;v{)&r_hbIBo&7%nmV}fvh4*KcDz8nq!v;;u*&#dz|LG(vp(P=J@ zR`2}C?y53mv{D|!1_=z!5WH^8`gzD2a=X09Q5Hr?%Pal%`A@Q64~oxzo0%u!?;WJM zV#Xx@-u-x!_D=}k)XQOR!wd-?9R@qK*cyz)5L1bP?;mdcWJ158%6Ws?ymGT2IVRD9EF0hQvMJU#hgk1U1OQv?sl&uhnY{s^U|v#j#~*sltie` zR7Nx{b+G(V>0s&Lr>3g5jE}7fj{MoJrnbA>^P}ftSW@752w?O;IHi@v)m@#|igxov z@=Bkpqu9mLjzJ3%inaeKkG;*W=h^%5-&j`0_)R9k5l{y>wJEYthl@lcX zX1K0JE2WJW)LlrgoH~njC7t?-KTp|C-pb7ArQr0Xx_S+k08Y6^1$p+!gY5l(=o?&w zrwyQTolkKz??+|@1W;iaQ(|$epK3s36XQA&ML8cZ0T;TI@2hs%&(u4?gXk2d`Hs~c zHUXk`UPk)<_6i1PAXgM&Vf4a&`{FARi?gBgw5qO(&t7W^snKqXt>!%<=n#>h*LPsu{QYsfTC&h9T72D%WX!!I6) zfuLz9*F$vrHY9{AzJ!TYpt+umE*VDDarTFK!nI5MOCA9*- zSvPF%k4MHmxnfCBs7D>DDMCcCX91{c2nSL!eB4h*TH6hTSL5D?z^R$0C)p^uZ!>foR}7w7ih2w_JUcjFC*+ zX+2*-5R|_iX}8f=-qedaawNOyWrN(pTQPksR+itNXE)uow!9 zba`rU;o99xQdDsFL;>gyO&l5fohF|<$I}wg=3G6nkQxD<(kBSTCwS_hegt4`NGxqRMH-M&ZYInXWp09V;MxH?6?$0GG>=ruGOtRgVAnp}80AyhzyzIaJFG zef_lDWU@f_LkGs2jKPyRHy~0tc?*csKs z;kYm;D4zDlW|EeXac6xed<6CK)Z-fHsMtZM^_+JLi>5`zfW61z-?_074a5Wrrm^sq z2gJJtHEQ^YO)n7FNoM87*nja=uVv4i8@=~1uq7rH#Jmhe6-W-o0ta-tN+;fIw%e!Op-yL!(_*4@aZq&U@?HcjEd518IPO>Mxh#%W%;i0n>AC7Dq5U zw-Le8kvp7p-H#(4;yTNbb-AuMj+ESV<&4xebFQ+YLF|md18pBdF76dy8d!+-K~bBZ z8$1Q&r+L?bKO;YXPB+VdFTB>ey^km(pc#vz>3(P_3m6>GU&!c`f85#<(qjm(+SVg! z`o|G?QX;R}K);a3yJ^Oc+=oVPB_74t5XGI0z4u0*B?mJ-as;P*EhX1=={dN&rucZ* zW@fA3{y0gFt_vDwedB$#6m*y$EPJP2wQi@C>=Vr%PP07P(3Q9Hv{Abog-nS^=l;|} z6bDBEYQ5jklCEjw*Voy6@I3_x;d~1ID*)(o5VRDJ6w~nhORem?zZ8_MwwtTl;&gk+ zoHYdlpFq>$e%+n^+2?}&a#V4m?BcIVy7d|K&rN zt|YO#70W{#eXTWEaWD=a9fF?lxOD1XX+3yobu z3!{~P9A`hdRwc6?z>O~Gnq~flhwoW|wr_%q%7VbHB(bO-+CZU8&sd(ICjx#0kTwW5 z&moj>03@4}85l0RaGXpEwO&S|ZL_XYBavd0gk{s(`3$nNsbRQcA~)$CgEK+B`CjQG zO`z@W1kS68Ay_ExiCq-%cy>P<(iHWw(pUFZ_tti@S65Z@Qg!PGWSTN+*bL<`LT?}( zdP4SxhYvv3Qe;^-wLBpd2P5ZyI~dq}II^{SoCUa3e)Y{d3#3{q7;6>{oC-&sjF$cu?h7Aq-K`V`OAy23j$a-1dtSQew>? z?StrgU7pp2c@@o#-)kt|wzU=bSbg2Q98sA(|8q8J5Hm1OSZ<8B*!52>oBVVEf9?Vg zm7>@CA`xYjG6?v)$~D=Rv4q1?a)}dBDy6`f#5Y?T{`!PN5$Rfs+Fdfr0#~n)E(*rg zLX<_YV!A(a9ja6Nifr=$bky%}^5!dz4{LYX*-};Ch_jPK!NE}%8<%=r<@wn$6M~c9 zR?+x~NKrs+L_BdYfmpzMQ3nM4q;>R>1lY~ZSCLf12_=W#y0bjk=aVKQ zds?{tFL~21og#l+O%Z)@#SyBHHpBfh)ux-gh2l6&xxwFA&BDfO1%|I zx1Kab)~z&=>JOmicg!SW?A%WGdwL*B6`U%+e^Q=}Cmjr0-c1S0b^?4ulJeyue%S(n zS)QdFBqS@^Re~Ydm?J?AQa?M37Hub*LQAVmL#FsgqIHt`0}d1%l2VA7|80|zsca25 zF*pRPdiCJC87m{FVlqUj`zRTnGDQlsE^j}k$F`(Ij|kibPam>RbX zy+3mlV)I<$DF`7YyN~G1Q97-gHhex^hU8Z87Eq(UZazXQDW^2l4*JC7hYxdeiL4*1 zX7sBJyD=+5R&Rr#b;ozj?0LW$TOd+_LZ{Q{SxDj1J|ZUkxFksHCF^nIDms!d*X}MI zQl>Wd+2n+?C@vmOz=_Pb^voXN4v2ZnWv*ea?KIcq(N35L5}}b*N&y$D)?cYo??8e9 zqed}%i4U4A(QTeSkM53z#D7=LQgP*Me(#yT{{2Y_>ISedAdP|}jl*Ap3!U54{i<7( zX#U&*G>oFW0;+ViPYsUuS0+saBc^!EBl0Bp_$tPv4uZF=v2t?NNmE7cVP7ks7Q1_K z_M-|K?7Co~LIQ5K4th7^jj@=lx_wD25rbcMQ#%O~Fp%j&u*7j_Mc68+jWS&jw8Vn- z`UqJ=6)LpJS^m9{#OHFbX+|Ry2qA{iCvO$x80rHW+XW21W>5J?pOO*^t;{KDsQYf8 z_Wgl^r8s79gxMx3rcDj$V?wfQ%us-}K&j(IdI}~9kgJsjd9gG)STyX-9ZenKd2x#9WJaTayj#m4n7jC{s5>i}{>L4r z{Mh&+9==*CPGgfBthoNA6e8q%VPWWwQv#GxP!R&3VZ@b`tm`*Ll#$6Z2vMAyQ`@yL zN0V2G86SgD=5UA`H~9~K}Ez=R^*0}F&L$< zl{Hs98>=K38J64ZV_|eZx7{?MmlH^~r5Z2b+wC1H%fOhB+iqe1!c{6W6MygBJuER@QiZy_keUW?Nx%MmRMO|Fxn{F`7M^wrO=&3Azpb`h7UP zCE(@HF`mK;1jcC@JBKjretgl5*~{kJ0phGHRT`WW3w|LMREXEh73f@UB;#Ipud1b? zaO$zAzb~*WQq&`=mmO(HV)o%b_qQzDRc4Qsg1}l#`uaNQr&kg?hfppQmGt9-{3ZWG z^k4r?hQ5o-ogfLY*>@y(k#8Zqq!{oNd0dPFhVKkuD8UwZy{%QNG05EHmy6fJgwzWK zuVt8}_uE8QL^|uA^Q9Y5pM$xe6{hGg&5qR@^Bl#SAXoUfbyh>%cG7PH;k zP*KF)T<6u$Vw2+QPjusOo*-oqTC_NzE1j#2W6>qg%pRQ_7xeQq$e8rGIPHtLkS-be z$q_0JjqiyV?&Ck))p3s ze~jYM_d9DSz@K@F^FcDU?%i=VEiOK(s&oZ(S+CQnKS!t=T=P1TCujFCDJg%RIXxd7 z4GOjRy3mm@FthmXObHfK3H>=lpPUdWPGmz7%d2$wW@7lgHve~rYIRnZTfhgLk>OE3 zH$z4Bj8}U}jmh)0>;_`0?*qn*WR1_mM}w0jwqX!gG&I@ZDP4p5rYdhrtd5)JL@ZbY ztCLmq5by9ofLj1mQUR`q$yGgX5Wi(ih0)Os9~uv3n)Eju@_WpkQ~)^R0w1qn10+fW zWbM8{?qDzT$9sQkC}Zd_aC6_8KILzrg7`E#5HsuRIOFNTkkFkADH>}T%V@<@yLaQG z2Sg;<`kM6B^m=1w{qP}F>~7BXf-Noio=pt}dF|gjD*#EIfXn@9-*h_PfM&*`YD#Wf zan+%|&t(5mQ@rJfGrg>ig0)##WmraJv~V|DG*|9mH-U)j_fnQJI+D6Ip%2qMT){N) zM8CH-^u}-0@+@sBu>^k9!$b`Cj-r7?0{eDm1^NnbK=cs>CfM1Z`;Y)Vg5PzZYD!zU zBq3-wxv&_6tN8KbqV|g9Kye_|DtC!$&i?LM|Ey2)KWe7`*PX8CqBaYV6l@6FmJ{lc3L2j&-_{i`&~8arq%CpfN~Ziw z4&#LZM&deV%HZBmA-?>+bd$z$S5KKwO+9R7kxigSVHWTs*K3~u>oNVY7Q74aJL+6K z`LLvUM;`FdXii{XT;SsP+uY`ne-NV?XK(~YDnE@_?)7qDJuTxerjS2ljub@*upjJf zPd)dH?sdz>>fX_OIDBu@UfQaw;o&-8Z&>|-=Nh^E2zD$6ki#R7Z$Ah=MN@DOjfdOq z-g}|r(&F~tyEm>+R;|h0J#O&+@F?)EsiQ&gB}=VyBQpW#O`fQTF_;~gy96hAc2&ya zg)Cwm-w&5dZVI-NV^aY_ZUpE-!KpuV5k)99AAU@4RhvVAj|ysWwpPi^cn_Gay{!!? z8$tmc|LTy`IBn4EcP{ZmtFZ@2omTpMJ#O5XWP%C79J^kSZ z_`3m`!Pk?_>Se3^Yz#A?flnOe2^)Ny*ZzKF#hD6#)l#=g|XI%jsT+q+W-xHm=SKz{cFps|}7} zgYEAA@?f3??3@ciOE-63u16|`e4X*`+i?PWvzUrSOW#dYGc#(pW~m&y^Hn$fc%d?!7GJx_B10tsiyQw&e*p3rp8Q#a_xTa^!-uR+{qB4&4x?L<8 zN)Ql}0trz)*U0so7Z&*qQ(IU@CN5jYc03-o^o>HdETS0_TyOU}a(av`)1%l;a`jsn zWQ|3=4h%Z|-r1wiA1Jx@HP0?Q_s_2)_S|Ojb`9EfxX5moGe;PA?_V@$DYi@gQzxHs zK<3U?C)}ck>((@t^M_`LTuM%I7^I=H9s%??FyN88_Tr*mir@Z{n3P-x^+NzNr!K8O z(Y^HZ{AUs=>kwPjJLZXTDTg!ijEr>oy^Q>^6ZeJG)IWf?2!9Fd3fcHAVoG-GKx9J2 z^^d^S4oH;3*hJGU20j1(l}aOekMEhE`5iX28-iBeYYjY$Fxw|WW+q~R95#EIMNfYb%s4H!14Z>M1Wa|hV*tIYxo zZ+vO`cErKYe4~Jfx~H|L3d6gpKR>UJA21T9^AsC(_X4<2sy7~cVn^y;FEcTV>54u6 z)AA$zvNpDKZf$6bn=aHKjv6&1h#Z~QR8B}w!E$_A0OEA_5>W-jDx{ zR!hYzd~A3Bb=p58wntzpO_GsDL`2GS7d%rJ+@59lp<(S{aSfG{OHhEH?S1VNLX+*W z8!qiVVGmbzst6=j47g6(K1d)VB^-i0mel&Z_NDIMPMhDVjts@&Ir?~+nUxIG+aVE+ zx7S^N#_ayNZBugrz2xJC`fPWZvq<^tp;DmXJdqii<^u7SYlXFmp&>!hxn`X;)bch0 z`KwvY3|Nj~szOpnTt5-=WjO!~fnIuyZOjIjFEoCk$|rGZ%>E4rYb`=t5sWgt>_!eG zLv+UV3R{|Ebv0lHokDiBHA>rM^U^DWz20TGY`Ne7L@ca4mh0A+1uon}q36&{jaW(f zWe%ssF(_KNipvOPYanu_>v`)+5|cdBBZ;;?KyFMl*|~%sk+^TPt(ABoE{`A`ED9^<|>?}4$;mGq;7&PQGm!^uWtYm((jiC$U9oFJGa)m@bZ zHogx&EL#~;%#KEl1DwPtZ>GJWT%_NxkqtOM zxp_S&mK#x;|hJ2$E z4Ns7p+yO96B^>#6anDDq&hS9JK<{Byr9O!<(RoSdGK9FvJS6k`%ZjCKV}P@f`#`|oX=2p)Gc!P#8^hm>E{7Ok5&mQ{KKr&)k&`oc~VLp5|PCx|G-7 z{WyOY{Lf0W_tW9e?{Ji}FvVZ)udWa`dcT)SscbGaCx`y^iDVF#F%t1X^V1IXq!Ur%{f({(plT3C2Eg2&y5_{$34@{NHo}Z@Fjbv+O@8%6~rV zrT>u!1`WnDxMaxzNfB!k2=}JUE7%)jw)c8kMww>|w*RnSBom_(E-pdl(J;59Z+fC< z@CDwOA{)pM)|B_!`|?}%(3|NrJS|*YKJtn_>U~KbI7G(Q5_!y!Uv((gxe`)5#n17L zxPr%L>Nwq2*#9R6pM9K^2Sw#C<5K=!eSySK;D`k&Nig_1dE{jv5(HupB2yVg+b5kO zLnXkoKoAY4(f;+cJBfCqN0Hu2Xd=PzZ4Bp|x;P`4=TqMB9nLN7o-|fqb0chu-@S7a z7?wu(4Xq8G!yL0ow9+}3JqT^b^s#crk`p@SBP1gG@0PAk)T0<$GH?~o-Y>*I>!)7b z?N_se^eI7drZY_4qns$V`28%&-=7z2IFuoyQ(Z%2K7lFA=nfCd6iAo+9-S7Gb?)Lt zy(l@-YDwMZ-3t53npCmG*VzO+yls4qQKV;NoNQR3=4^fdxh0XI7*C^&L*{;?*Rl%R zZ=tvC0P)e6Z%}(YEO!t%^_-f%I!4-SF+ouPb8rTWbS>_U|V16l~ zGx|*hFUc~Y6`{XSo~b$I$M*tl1DSz@1Ye-`3hJ$kJD-v?so3iwIXUUvZJJGBu!szO zA3S*I>ci_tKAE~=BLY{hv>ILq#56hfy<*oPzy$if^+rW#k}>HGHkP7g*sh2*UzYvC z$p}ovz3+JVaEodgZx+B(ShrqG8f2aQA=Y#!#7`aXfh{KU>sji+_0XJrchN+hA6uyC3;b(MggM1NsS;6!N5mVOQw0 z=H2@Nq2}vBeYKr(M*eDlo2W=lFd6bUcwzXTt6Af$Q1`d5!~f8d^7;O|C5gFn*#qTb1C7y^O9Ko9(LjkW+5cKROT z*w)=@pSqZ$Vu<9DFE6TwKZsb#&Wc#k8$j?0y`o*jCQ-fuw3;s#ep{Od!2I7o}}>dM6#9r#(sjd?|M9Ro}Bu!oCEXQ z9pQPy6AB0BcpOIvsqBCg=%e=Q+d)=CKtu%N;o<^fn2Ut9tetUr^4yLux(Ys2{#~z= zlUa`Z*?t8(8e|^@XFeGuWQzV7IE$`WBnqa_;rp@J?P+NY_{FCs%-L8HM&6L#%*Wu; zVzzpZqF&-{ezCRa`6pLoIiOC`1ky?;&^ir+MhRpsy=CNW?sbTy5}sk=M)NEYV}I^w z%Y`O1cX9*bMKkkEtn_=$T$Ksr;H@9Vt~*Y+Y3okr>liY_buAD+6S7X2RTM}qQnPjQhqsO>}xM&4# zeFR#KXcpyp5_$%T0>u@YJ}c9Khjx`*qj9y|@BN0f8R4q`xW6ePjvPI3YcrMpgbmP+ zBW(J+MQvYbq$8P#(k#mmL9&XdOE$d3C`Z2zv32r2VFEankK%4PDlZ^ZXN0smcV)H+^$unp0DRU?H-QOkEDKVvd>F=?A z+{gaZRELK~3FM0|_sj!CVQ?f>0u6Zk>H2yXi8;Cdp6|fQzWObmDlni^`rG-myqulw z<qk8M`8l=0C1>S9RJ6Om@53Hn@}&do6; zrEu3Yn0>Ep{oMf9RxN1RvAE#(aNZeqX&g6dX84i2>Aa`^NR+xD^Ji+2E~7*`O$3l` zHk6e!PVe3&RG5&&_5*rpp)PM5N)oLGo3M+8i@w5pzQ(Oo+5-uj$I6t(WTXK>?c~CK z2hxo;I@+|L2pW`U6YPLy#~2lBRPo1k8GVJ@iw^nw^d7EUBomH$NbBr^+sU?x4yVqZ zhj}6Y#?p}0tF+Xf-TPFHlw)$R$&ic+ZUpTYGa(PYBuy?1W*t8#BN1u;+pV9UCeH~V z@R96#$0KV~xbDZl%c->V57mXiP)JOF>FyO1gA7$X2&`?*_kc;BTZoJ(nIbA7z_VKk}v{NwaUYCWmS z2AGk<04+je)(RsnFbY~6dsbSBCT^nlxi8)`pMudvQQk~hVL`~x-QLmD*A13tWi zzMf-tW<$uu*g|~y&-K{QT!n1BjZD-Cs&n`QQXZ2~zYZMZ%VUh_qr&MrH!+IP@geHq zFa1DY*f@dGij^mMGCP&O`hv-o4L8k(|l}M&Y7-#X>nqyYV3iCfZcL6 zFArl`nPH*!!VPJIn$Sllnk_dE%s4j;H2#W%r|WqWV2HFJYSGT7o<@5400st`m_Y#E zsF#h-e6Rabfu+eb1<562k{SgHj2`TZ8eXVL0fQ0$kEyRo~&grf`sHPW}<%N~~aV8F5 zlQ_7Acti*iOq7fv;$Qaa(m3d10up6J**|FDB!6@nop-_FLAO&@;b`57_6Td3LwaCr z@Vnm^3PI7sI5+CF(BzRWgG`=tKj*uQ3M1r-3P#8c7k%QHgWU-^2@z?l zd&qJP4Mx1m+h#AAg+0zhdmnPUA@D6}ei{-t^fwwRK6;72Y(nk}=ED#P?aOPPp}+QA z&@|`(VJAoe^z@_dMpd_H0%b!va^0D#Ci-+)S zYws(e4igwC;`5*8ubY$B7oX=KX10jOBB_b8058Y6p6Bd@;Ql}%Uuz$aDef|I`;Dg^ z92ui^sME+1n|mB^7+qu7)TXGTSXA_|_w6n9zDLY|ne_eKiAi0*ss;-P4&m3yE4fJBov5Y+Ax*3n(cLQ zX?4*zGn)q(X6?P$?DnNAJc6FyR$CX%@XjzjRe7Jw*R{Fh0)Ac_B8JLQMTxV&(_rx`C9+hH>Iy0a(y`W8!9x&PPHl1>%yYjl7=v0|zDf;hpa@g+BaS!d$>i zXTYOThEas7;v)z+&u91Qk7dbPsEIi{+%Mgpf?CPHk;hPB4kgrXL|V{755(u#GRX|n z1BDJHqL>McuOOBeLf=o?YJD%dr}?;Aw~IWJOyGQhb-1*Zx|7Lvx*i6m^v=f*8=1S_ zdwV6k%D&#-Q%p|{DVe-`eCJXvc)AVMM&;saxKirXCNRjDl69@qzxiJDT@fGt!nOhA zWYe^!9KR$JJmm8=PG|M&J&1Iokfr@#^meH!(_zf=J}p_#a?6krmmjCjGwhZajQwz2 zHNJy`6OEubIGY~!xb^rrSJz!wJTgeRSKlp@P-k3!JNuc5O}F)WIj^q4N-|`ez9d7( z9i=g}dzR%#2uBNMZEX>JBR>U)!Xb$XnIaXhi~>8QrV%CrWgYZ}L7=07h@kM>Ync(@ z@lm+b5=iawNz`#3dPs2`&_4*xp zu}e4B)|Z(-svjj?y#)9gk|m4G3*G8vDheD3oVzT>G(k5@G~%v*Y|EzGc)j%p=g0tqgn+r)L7lEKZxpCFK-$fDQq&6+Q=gT{TSVFY`uvH?2N zy8JnLvL)o+R|~EM&GS3xd%UrFA1=DiCf~>UTQc8w zusO4pd*1)<5B=O2E}AZvvZmi~#(rrIE3^Y^=eFm^+3$w(^6wuqbh`huHvd_o%bvgX zhj4z%jN_)GOOl)jF+m4VZ^dlcGGq#}SeM~lK_SO|L@d`@deZkSWHd7=R4R;;P4FTj zd0IXzc2kA9gH%{lc)xSUcN-FL-bL#2l4y$SE$uquAj@lIW>WSQJ+H9&J--oYjgWw( zA|{&`xEO;GP`$kb#pJP-g*8rvxa&HvW|a)PU*C&7G@NuuE!4=d8yS*$3SRUO*R}p?lGjsbV9=sU zudV&4Ql{QVSneu!q03V0mfH^0(r|~=tU^Os^w2+v7-9S?u#~!bOp1yE{88L-Fq=I1T_O?E$hV`VnP$w1d`TKhm<39OtuUL3OGSw0DxdrtGcuKv z(EAt*>gNkD^$6yEgomNQ=`@qFfkIVg> zd0KVUTUojsRjBJ}^Au_P6?eO$$T~tu{DDZpqOtFrI%;#$IXM|^kq!+>%b6R$;S$pP zacf2Wc`Ese8veV$hv+9jbR#FDh8AXT+?_vH&C%4{hyVEUR>oI`qK~A8lsr z+R7?~@Ac*}#RhYWEdkyq`GX?{pc>n98RKr>^efIldZMbTTE(zabGj4Wsp;pZgnvGA zCp9m6_)AItnkXtt-<4LIup|rTVQ}55h^-JGoMe-I)u5eCn*)A6RqQNlHGGSK#)87Ad@hn6|i`r9=EziAHr}sHRfv4Hi9Wof22&<~q701=$ zt+iTmqOFo3nWWyH57Z@tOeU^N#PnRx{GeA+o*@2Tmgnihf%Ov1QkT~PrV+UK!x8AZ zOXULS)EN*otK;e6)arGD#0!mPhu-uOqU0;Acxk98@rs#Fa7N6MlkCe}`9}A2J~67L z6e&(sFf*BTGVa%OZ%en1Y zvp&wh%BinFG1{KT-cJARZ;ynC|CQF}Z$HErO*5(p%gBgy^*sx<2+=5;Yx}4$yQ!>H zm((M4D>FK{N~WEH_M82T=aqu4+v5q}_f8Wl# zggBM9eCD@M-IXO(L$v@8tF}NdHzzmdrQn|JWZ-+!ybcb`lC;lwSF}p;%ek(PnwC?} zI2DS~vljmkanTV6UqU=Z0cM_%?>jzB`ucw@>VF;bqD7|B2V2dJ@1fUMc>)BrPRonG z57en62z`%|Wtc?>-VEMS1l?rY*yw#YR`zs@)tPrSAd7BdageiYT=x$RO1&0MkuT+H z#@mAqeU+@n8s=u;pg*CX&Ds(`VMyH3>G(a+&CXgwV)bu^1QLb>Gk?XT;CsQu@WJmx zV5#OX=TZ_R+~8V`Nr~TivL5fz|7yBDf9j0Bb=PDpN0#nL`&7~O!VgIbOfmk;Vg#&xoqVc79?uV-+?VoFRwu_fO{$iymqu=XGrbI@!D z@)#=fDo~y_mw$~l)CaDRW2AhPaqRwLwmN(8K=#;=GyT+fMfK3Il}ehA+4+nJHR6$Q z*K##HJ4#^1kAK`3*f^$*B{Y-R*$LhYerAas5q2J#gi5-JT|ZBb@;brcvbs*gA+?1< z7U^lbV`K0B1me^lR8+%>-3HSF`fqj{q2-Vc29>bW$vxBp?0$+X@nS8!!vfbSpsVx1 z%Twiw-DT~a?Mg=TCt9v78Kb3i*-}-{UCxricmG|w3kB8&03B>U|MYE3fpDYu-L*hn z=k*nVF)3-mPw{&#VxAhq4gP$^Gd>rRYolhr7vkWkSx5w1$bMf7txL-gAvG+|CLj0Z z#U#f3{IOhjy5nW-fz&{uKyCN6Z^iB9=r{Y-)!L=mmbMa4r$^BjIpo>T$#Nq?T6$uGpfDTcQIGyHV5ZC zGcCAk+&MxImABFGW}XI~3yTZ(LU4mJ0v#D_HjDo4bg@z}VvtKK##G9)B&Fwv%lg&2 z9&mzf-DCYp{t-F+6sX$Jd!f`_wCQDwW3S_Pi`=*s4LF$;#OCNAxZ&Q?*y=bGQ_=&# zSVVa1n8Jux+}mW-B4ewns?_YrZn4l!IQYD%sl;qNz`WgN)Y{wa>heaw-!`Jy*Z5yp z+79g-!t2wkXFu{5@!xCC9=~SdskYW%6XX=eLW$J3se)Q#{9xqa(^FlaHw#2_;Ku7lz_8%$FSd?WbUSgSwMYl)|VCOW>Uo<3*9m+t+ht=-g^zDE60+I zhK9NuBA=dz|KPM5yJp#^Z|D!}Fk>eyaZR$yEwz{>|NOQr4ed95$T=YXo=ONx>&-^}RA zeZWvr(!J5;^_te=Pt=KbuM>!6lkN_D)9L_pP|)$YdvojPSIO{dhhC;{RBLLJG7IfYv*e}5#DF@ z49QKe+X{&h<;Mo?&DQCjXK5+j)0jKkfV(%}=SUeo%SrdoP>*p867QQwhFy~Aw1Wli z&$UJk-zaVED__9fo`*>MY47{KMr%G7ko%n4Ig04vQ&k!`oL$qBcY5dScq5PUN%WCs zI<5yCeO<+W9W*n3TY1?3)B0xI|72*@?+u;sQVG|4bMAl16q~^<=3yUjc~#^wom|*w z$0A4n*k-5RitgZ)&j*|i7b&3p@O?|ecn3>pnwW(6fZsADs^bL-pz0Ly#H>#QKh2r1 zXn9X!eL+N+jfLyq(JErD`vXUAl-y+_0~=YGoxkN_>8e{sh2-M6-FH>%UGsu`1t_tp zx9!5j@_=Z5$Q`ZQbvpM9GOUu{v7IuhwP%rgLtUl}xlIRssH`Hkt(v}lpRTGGRpGWD zZ$|$d5ixy~M_q(tIaTWRUjO5QimZrn5WVr5e?!?r%w3f&)A)oLhst2(;cy zh@&^Dv`!N7adP{;+0>tvqiw$RWDiV2V5;6(w z%a#*e-tdv7N+}oIj~`jNgM`F{M5*%5!@QRA6|!`_ic}_GO_e%(l=U!gc`%bG>lBBckxiNDJ@AKI#mQW1$Y}w)m)XN+G!Rx-1qzHHK5*0rFotF z9N+4pc8_(h=1VQ13Nq|`ELa@Fob3kOJV$TuT)JfFv5{2VeAJKqzA}!wQ9kNW@j>p6 z1Xmr=oIR`_Nf_Xw%*N(>?o-X8cl8;T?f)NNGhy$d7u5+If=&D_kq^|yf4O_Pz|g;k zfcHbAH9I6vNF`HHNs#OWjPNIR8;ls?#Di$RYB#H+x&_&8%YIXPV#H?s74=Nb7fGaS zGJa?;JRh<4*<;?jix6qqIFvBZ*=1PNNSx?06tIs)r4 zi>}&W$LTbBD(-KU?&IAKKJJvJuAUX4ks4@&7|!aY5w~aj6p-Ig!w_60sT3(oM7|w0 z62sX2cVfMa8=YWInTQx(+5RRp+AUlgzaFj+Lbn}Wn=dAJ7R@^pD%s#Li4Md9E|^0B z2l&1nO|#}&^pYl{vIJ_`9{4O1TB=75$6$&iZSudXD15r;k1k|aa^y` z(L+j$v>CI;Mm_JCCx}&DP~y_rJ83$)ZgVy8RxxqwBEBPXnvMP}?!Q?sg>$ZA`g`yj z(IP6#p!Twu`eX1}fe^1c!4PYf#)sVQ-CIJMAIf;ZeC7%v$WLo+4*2n$;L%&drmcDH zjKoYLl9E*7FfSQ*A$E`lPt!La}twQ-_ilFX5MUG{I)Vl+pklD?a(1yMPGej&| zMh?!I+Wsf1KbL`Tr*H;U{q0FHrpYtJBg$_#4}o#~2HN;kU(-?N9|cX_IY+bzR8aA? z1SQ50#&5JgJa-*vjaZGxZGrgE^Cw#RQjN8=u4H<_6}cx3?Lxuo6<(jN*G^t~Uk)|I z%r?e06rM)#T~X#;&lNc|2(G$NYX1}yxvn~GG#dW=pgG6Ze+qLD=%|t)a&g9J|J$2D zPybT)zY-LB!~;GWb{{&!fnlTddID2RLfPz>)fD+NzS*~d4Y<_qFZ+9WCqF|D<(eT& zV^24F(I3rMNiAPO_W2t{gbFme6;_6s*U+g<4tdWve$Wdt94xa*;i80hydYY-#rjWQ z^t}uL{+Y&pb^7r;{2hP(UkLZgp7dCb6U#90Lj4EwbX&K?dJgmy4DlF z+QMble}8ysv{SPSP*8PIH=%;-KN!pU^iSIVzpFDbuu4%#t4c1stT)^S|4+gA9|ec? z>o)AtN27oL2$~`yzjbz2|HFR&BQ{)6#{vF@5hz1I3JH%CB^IVInR$Bh2X0rZo;>HC z3>@W%y8h-OxM=j}`e*EoPC^)PFtyWjLGO2eh@3HC3vdix9D>=sO;#k=-+FE3`ycDT zqYe^rD!(Bk`q;go=pR)v7$a1;aRYM`ObxVFB3^rSaX=t$^P{DtViAvlqk-u_wzT_R zBXxZ$hk7t>6qo+bkmwO;Bgk9tNoj-L;g14QE~-+HU>}9n&-gCC zQuhfdu1{IO^~Huk=5QK8c;FAPkM0xQAFNT-KhF|=LE7GwdkZl|@P8=?^Au9|@L~Q0 z>-D?|69hTbJD)bG`RBPqztBko0#!dEcNybtz$w0V(37R)?)ewqu){dT{k+KgdG$90 zum|9Ad5SeKivN{tH=xMS>Oo;)oBWn@DLlMKxNSYK6UGrloW(EuDE>!HNwz9W9uF@K zEzBAIJK~Yt*mV2gzhiCZZL^?kiPFzhgBoA0gSl1b5{LK(oGlf#uJaIx;|?qlbMjw& zl;L7-#1F^?cQm*k=zg=?)g}`()o!Iky3L>SpPnWl60W-#?m>hU2BSyx-#QpIrNkG%aW3A2ZSz5+ziQR7rj`0B8P#@^M@d2^G zt3kJMyAX<})ENJS2_$CEmT<9!Dka7B-}7Gvpvy>)?EUtddDF8w5FA>U+ght zl;e5wPvj#G3XCD3ZHdO%nH|Hh_OdnBH<`aT;LenNb z%YwMvX*Cp*#$giRNVHBV$2NbL6(UmEkj~bjUZ8Jp`cP@ONn>1|7}Q!E>!)03+QD$P zQ4Bkn9n-M_sXxQsV)s-oCF0uBGke@K2Gz(uIJYNP@RI_-9j2Q}etsS0u>f%4C1D?d zkaP4#(xfay&;jlXaBSpS0WECKUvYhC4=CtNxCLzM%C2Ax8VrLlJ{jey@(%`K_Q|w$ zMO56$lJx})k*S|b3Ase{e)8(htI)sez=yeoJJmkjq!lR!NdfQx#<*mMS`Je3aDM74 zthYxlfWRZYohKBy(2KviNJ*QX6P@4mYS2{Q9c2~FuQWfROgxPs zQ1lh}6ot;*ydPX589_vZkZ8mv^{_sn4DqUepN%bG!&T9*b|wmv@hZ5)r}o#sHe$(F z!fpB|KV3SQ1e0yu$@ccmmnfDX6ZO%5t8C1(d6g0gyG7WJ$9{|76@P)F1_YY5^KdRm zElDlC`$@8evHdET8MqZqE^jo`SB5eH=%EpWeZ0v3$SV>UJOQz3XJhm@HE_?7$39H` zPz<5!QsEeaNjNE99D=5%3DTJL13eTW*YAPn2iGcLpb$6B`hXzfwywkp{WdTu==kG@ zueL--m}Gm`@53``NEWVLh%+Z;doNH_d?Bh6HMVVFl1zFOkwiCo4J2gZq!uc}_}iOW z_v%A201UIlbIRrUo=emv`xoRbt^wbmOQ^#FLAN2o7ZiYV5F7GEW*a(no@O=Dcip;M zj8frKz2HF3VscUVPPK?#*lKB;Nf85Cm?hbyR!Vj+oY|3ZkPIP$4*I3i5G+hwBka1! z^xg$Z9pce(rb4kZAre_eo>Xd%8dop~v9yKDJ>zohAzk|wOpl3@@At@kdqB};YaK5cSkR$RZ#?f^6!{ zJ{+;+UFfLR?GIdcq{TujF9cgKw^(nmnJTh9L-iT}cR(OrSErj&Db&A6e2EM0(Lc}s zS!)b3juYzR+QNAU8hAn5CY?93{cVheaH%lxLi+kfK>7l`u4YDhCCshY2*_?1b;&(~ z0bYZ?*UPu(6#9T_okG&{Fztvm9=xQtVH`+h-BzBWzphu7uKou?KNOALvh}UrBRvtd z-?Vm8&yE?sugONhZR|Ke8ue@GWx!Cg(c(!*YJ~{aMp-*JVYlp7pnvq8;%m%h z!bxnvH`QaQ&v0wwX*nXD@Q+fgfvFRxB*8PBPW}pSLMq5-vhN-8T|EE^u=B^IGG!>Q ztI}Ryff7}BJJxQ73CmS$YJ;XBfafb`%&_zJ`5!pmpjEt4#cL89Bmca9X9*)% zr-S`nhG3Moc*Je~gg7h|FbTku4Hn=)x$Mk;?7%=GMUsV!!+z_;2(Fb`$#_siTyZ4a zdJsxRc&r<+l|+RDm+WQ0fRpJHY=znZ4{R_7Utj)x@3_k^q~!*oP103L zjEvW~QpYr4IF(zhQe+i|`#_=X!$+rll`C4(;@2;%1**(Wy!7s*`v-V0W-3m_o8ts{ z81ol4#l~MN3ocs?PEdv-?m8k+3bal|_Sbv+n~4`Hy3AHCVtl4J2EGiaz3$ubWzNR< zOB!c zeC&tm^V5(xsR&T_u|>bWxWzm7G75v~kZtxj%f!$X9CxDPDz!81eFJZqc#J*6@Y?tH zJ16hQ=z{1_7~*m8XJnE z_yKX?G7f~5FAxpyXA75B{H@m6OI|%ck7L>V`5KENEpkBy$xR1t6Jmf^y^4!BpGMtD zB5I^Ukj!mvc;!&!9sa_JN}0UM(cyU}F7b5dI!c91>~GX!`5JI&f2*rC3|O^nNbgC= z8Rj0FOw$feBfW#tK1sJDzCyY*faa;TOTR6Mr<;U;iE$6l!0UI}%am3a-{U-Eb3rg% zOLj%P;yY|%{U*a=(zi~$&a4R*#k@<&FP&il;47V;Xa}c_!LU}qyh@Q+xOM4CTFT1y9&R zwP~3p0yR8$wRDH{#n=yis;%x-uH}C>>>wPpLAFUfrtP-LoF!}i#i=Zv$c}EO(cqkR z@WV?kK3qFhuYr;gFG3|nF>(3E$h_VeJF}*OGLH214HCR^W1Vad%#jf*QJ+K#q*>Wd z$a42su!)Kz2u_Nqi#D2a!TLp@`B2^al>uhzxIwO2DmKLL&rTs#xU`R2v6;VRfz>?7 zG)gZqSy^TBh%}1w-9h)vL=qix@5e3*f)6pA0Y7|Fc__NS8Ot)IAY6WZTk^h-{F**h zr0Y0ECKAZ=!^MA95#Xk>$%%uzHE`5^|NoEN6g}GMrfKn&s zDFQX~Q;Yov*-ajoIn&{CY5m0OC1j^a-aZwUC|vOb@V$mJKBw<#xozpcxo-O#0f&9$ z>VfY0l}JBhDu{KXImLnpo_b6loETupfXr7J3P-F{BS4L0Hn)2@YJj^eAE^+U6stys zL~Pcg?{|sLyAi^9o$N09Pdp^9xBxw?^0v#j*TtLwKJW*dl^1ki&Nw; z3w(3#p zx+HSMID<9;ClYAjUoxH);&$@=OZp7xImuLnzKhQPuJF(lkxmQ74y3{pP`Oi}^lJGp zCK`r+LFS`pwy74R^#ekiH!MhddUs+|N>T9F$V&SZ%ZRP3 zMzOngskZ2s**`A`WuISy?UPueY_m|QUqk6J4qPolm;R(n_4EVK!NXy_j_o`gcLW{i z3z_bmmj{Lud9AW{U^i^}pgr$D>VCtfg(-_?$$IXq#PcZvQCf&+$G?y9I|&j+!Jb$~ zs58YHHubVM1d1#Hs7`t!syf=ABIIkTzA=c{tG!=MVZCaPV@6j7pkuR2SI*sa>j`kPR%cbq@SiA(VpD?G2*zkE=)#dl8HC$LouY5mFhw zw(jG?H`1}b>KJzKt({aHDsxoBqz>u`(4jbum3Tmyom7-^JFD$4I`97*$ z453zzJI|G&@tgC-zvi0rC5ZxfomyTX&bzQ(#VS>qg9f8g;=z1%^lR4s);tp?BaKtx zGvfEp=ZHYrNi3`GOm?NV(2B1?jZf|sd@Boxu@rlDr{u^M#jiL^ zYb95dMsHS|ajmJm#rO05uoow}8_ zz<;!e2v1_JRf@cuGB5VuS0{1FWp2Y4>m4li(hpER1rEMkaqDpo7EN}LhE~VYZk^x5 z0&Y|Uox?6$+q%*jK1bF$lEGHo$C*xlY(Y3m3>jzTNf#rvRWF*-eKZAM#KdE5 zOwNDq*sGSXf&&*}*b7NM7Go%AyH>l^PG&(S|KjSxKNgxI;dZ8082{zO$8}wFo^m#v zc>ZC4f3d_ABUHNdp}SW4%mp101n`TSknXMx;oV*7+$giy_9(U4$6zSyg4gnH*Evko z^6I~pd4Hlk_Tbu{%pl_k#NEmaZ_Ta%A$uMnkq|T)1~-E^+*vgGuSpGGDoKufN@oG| z&5!69M~k|9bsz6tDhhzOs}(xNYS8|ba%r0xjN`WawZ-W1&nwYFS{EOVoQFD)FHDYE zxAC~Qg-csF77889h+1I)YlW@1?As@EBLCK3=oi@dgs_ut`}8?)O!!KA;FHi4k-qt9 zE-m}^PP|yk^tck zRuJ-X&h47yQCzf-{Y#q{<9IkW4?lj>*gFyNAHxZ>ZHKJ8T$tFfeTn!PT+!^a*+c&n z+Ck?b7V=g?pP_#~65g6$ao`ra^PvWhDamN|gc1Hhp5=@z8O)`)J~7X^%2!zcy9;>x z)jS-7z8Z1Mv2D>LX(!4I}e0STMqE{eOmwpz%QXBe_L#a3b!tDLR{DmB$ zEmMK`DALDrL%yfuUvz7eWmW}Pxk8Jym zMMe#dvt58)8_-nM`Z$B$JFv%C`ZkHPIqv!TZX?j39KjI(ipdeLgg152FJ8Spq#!fXD+y2=T#(!tU<*|ipmI~zFN?8jHoK_UN z4SzJ&D(g7{zjh=;!&|yuA*uFHr8;)Pk&8A&;<{1lofn&?xyu=G-5;z!@|JF{HDT$j znOcu#$mbn(^1||u0vW#VH8}iR-C)6vAfJzc#PINSbzdt#nDm-XPwRL5{--=+2cwHY zH%CG+a=;tLQOrZSz2imwV%+=C60~?HjQXiXh_|~kwI(sU%1w1k+>JmFsft3+Ze6&Q zQdWI|a+$W?P22L_EW-ed{iWRk>0|jGv%qq-jTTo+xffzK?JjDmbW+A~`8rIE^<0UK zJRYJxAT}vG8PR>hz%s#|<$DrgK_`mqmzJbj4tyDYIbOjBk4dA`INyqIsKrwJk%D4g zh$iL<)@)IK6~E?^r3-b^=wB-1%wj`d>`2{)7Uevwb+&Tl-2L8kq-Vf#| zBB=Po#FQSv|6)LkM%_CoR^ysiyE~Qk$tz#T&lhy9>;M`%2l!3RrFVt@PyX@^@ho~b z8@DK+IsQ(^1#HI*$h=G)crn|)!$7oIpv>(==S=@_k5T59@#!vi zuX*1XbUL<)&${6;X{CBQ#Wao9?f6fsM;z?T@`N7I>WYVrIOyE@Jt)}z?ZA4hvmfLs z#tpW67qB`loVXuI@6EG@_&?07xxok*?KOl?XyX-4cX;AbLD>49@P|UQMRVi-7psM> zx_*#twJL8#DFA5Vs{fY=5eLIB$#D8$yvWv#;S?_`{|!WTLfDN|*riduf6EdRc7u;M zxG|jnkl6o7=&zwzeg8g8zsaMh3H;CPHHv)4+2;TKQ?2%c-B&i*DeRO9+3q?T zcB-QSS33kM_R^b4x!&O?muaFKdi2>jdgE#T`>~2Cv;Pfy>Cs=-5FJg)e3Y{22eKQ zGTa(dn^a_BMGKx2_l>pw+NR+{{So}n)JMcl`8J?7ZP#GNceK!I=uWsVfj@ksWQeS@ zA%)Bfl^OXqKIqRfw=TxK7PXH>M03~K$G@}j%wmag zGQQ`(E!EHM2?^~{%nup=?#&TKBae;e?ELwkUGF_ zmd%Cfc^Gu_zdHKjZ}>bO52wF~0R(zc!uTc*TcU0CJ?N_?zx*OSBLjp02bHMp63nE z6!cxjKFJ~g?LUwzY_$e9{N4KV0e8P{@}J0Ri~3C;G&VO|uKAnZ#t=R%h`%^Bs3`&U znlakxp1_->EL3d843Tyio=dbMF=Q}!qY?uRUG)3Y-lZlZKao`Eb z-}F#2THAW-cQpzzS(+DuL+gQoRq~x%5aKcNZs`!eyN#`F+|GD5;2u#(u#6LSIjOi~ z((p}86X|Jm|MaEKki%!>&hTMv>2q?!J$a?XL}v5b z^YpsMSr&`!ut?XBoz=wJ+j(;vc7~|oT$Q!u4TA#T^X6P#&X6Cv+uwINRekpPQrM>>MG>rX)T%r;5h0cxmDgJFrn~!* z2#cuUyc!=q zbh}uyEe+v!ueZW?I6(*_+i5$TUNJB$Cb`x>11+#35KSezE#B-}a?OBa#+}S7tdP@2 zSRXjzrtuq#&aIzgGPf)KTylqB2Rlk`4vC+zUgY~P4zU{-Q zqSdgh#jlw3ANDmk-J$orBAw>tY#1%AmX2l${x&aoa6pIS&A>Jf7rk(1nz|)k0ATvU zdgI0iC#-wkYFAgknVS|GW;{GJoytJ*Q8Q#lYHo)QRuvO@hcUw7(^ZjeX6v`>X9{r^zKIAvTDh+}`75>D*mZT%_pqbjBLA$+zKt2e zdG|SM+{=O4y5P8ycSGx34r<$mk(kD3n~~T$Pd`!Q08wqKsyj8e3}?ebE2G;LH){BT znGDTDtk`$|$u!pTZL9)|y53DcP@zQFn7qSpdG=~3e(XLWrsu68ok@CCZ)8WU)so_V zIC;fw{Mth0i&F3Y3`OozH(|1Q@Y*wz0E{d<^%rvRa@I)qHe(y(T$+ z&Wj`B(J|{zWRv_ga(7V)E*PV_YFr+nDkFYb9CbpZivCK}H$GJbWswJ3GHWb`npBHkfC)@&H{3*4RjXX*NNis|B`MCCg#~SIHp@#l!znDP;rC>y8|WH6 z3KyCTHQRoBBQy_Ns+Ql7F8&NYE-};EYr3%VE9aRqOK|g2^7lE5;Q)p2uiXCKmwZ@u z`6{<}T{`@0c3+Y+)HiXykOfODyo;qO(i95;nOHl{Bah)>uM@UuVP)^fHfEQ15uE@T zwliqrdM}nfqnXB)mD%`{nr;qJ4l;G-%`b$1Aus}gs}6~G|5wp@oC9kQr&^oK*piHc zuxqthTP;6Me0dd5q9)LsX(CTc0Xw&9f;)~KvF^g&97{zG7vKN+{Jy4&zBt4VgCx`5 zia0)O%iQzyMTPlI|8vlIMDuIwa5?+sUb4^)SE$PJHdrOtY32=w)^AI=XA z{$#rtIFWR@>_qJhOuQVEZ?@AM)KlORAlr$N_ta}Jl6-gUW^K4qK<&`W6caJ&yiW zLsI;5rN43+may3};HlwH!Rnv(Vvd}#C%xy&$+WQn*SgO_H{+%iJWOqtrdStm`YP|^ zbaqf)<~*At#d}f`!?v<(;6!|U#%y0c{7HEd^$A?5Ax1jyu4wF1R4P`L$8WIE#g!iJ zgn&TY@hV|S)&&k*uK7q*TbR|c0E`wOTp`}IpySjx4|T$r_Sg?8N6fiLY!Z+VYPhb~ zj=6M;slzrc!y{IEIk$pZNqyLU=m$6&aX+O)$0Q4MH|lnI%$^LD_~BvE-spD)-{ zw-|0|j*TkP>ViKpaLYbDQ{&&foypizt3B+H^Lr2Og^qNZ20|w9CY8pJ94?lAn%>5T z-tl|kv7Co9-;D=n4~|p~&uO!cm{B^eS0)p)5M^ufP>?ZWUTwW;W}rnP);t`-p~r|DcA_VFJ6dIHV9Z=PTUOO&l;97wh~^@ zN_N)Om8~`6SXoI!UY?zFZo>6q*53C&y-%IcYp5fwUTd!7$lH0>oTXoPzTvzOv~xX@ zZy0i)_LepaEFWAc^TKi&#>U7mf}y_wKiT5w5_Z6IBpT66YyJXoG-?9_jbpvs+tW(^)= z_j|5~CdgUqVq%;mY+`+HZSGP-J4C}X&qdJd@7^#DX*xsXh`7s8%9WPF2o+_P+)B9s z;p<7FHd1HHYfNKRCSCr(0tudK#xf_YNNF#Jj zVMKK`x+DEhse7nCFwv{QO~6=+u;Fme-+g4oR%cWr%qKuS>_$jb%>)fE-YWr#dWZ5a zjjP{aN8!X`qR^GR#HV5h2Qu`?;x~z_JAdZCE{N1SUqCldH-vIm(7Q4u8Hwq0hYm=~ zl+`b@l@~PB!4?7(?;~b%6&)H}K5&HXG_$Q7WNT~)3S{<1XPc3iZ^y0woCjm?s`^G3 zG$uhIjov2#n%#@WG)PRY@=uTl(qN}feVW>u+*v470m5c>XBO1N(R-fR9v-n?5GAbn zC;{!YxIeh2$Px4H8N5x(0O{26cYm24I|XD0Or!x1_X$Js?K1g}{C?FD;k&0{yH9`mo4QK5{cqAO8!-eQcN_d) zh?`}WGCN238EtLn_%e|uO~x#s1vRVf6qyj-%hV61^*99aJ2y?**+sVX)qv&a{&mO2 zcZt%dR~<1wnTrz&5`-Q{$NMN(mJNH#DVv=wT%64+dwGmz;)1E?v5))9G9L13!+wI? zi^j2Uk2?9anUw_@!0rh4k$U>{wVlOvjyirf$?4seJNzQ}&>H7sxJc^{%aL@UO~}~z zcV2usd}zeE+!3i?&dezot&+2l3IQCy8vLL|l!Wv<>3)PCI=SrNsQ&_hP$6t3I zRwG8JLL_KTQIzv<>vVW&23tO;=;fPlHI!!jngrgF2onWbt4ws@{m1|q1Bw5k?GdrV zX#nZs82OP|hSFkolIt*4^okz-)abV0RV79eg_YC<@i56*(-xUHFadIO(;1|4#=YHM zuAe?MJbgfN;NIBU9+B*^l9@WeUE9G(NDY`C?5D$l$*zCCyxCUF;Xa5bAy9iPS3H!S zGmO!cDbi~DC+^c9v>=n$@#JjzhneVWf0<+UT?F^CnG8wZPb4tPeX6T?PiA0@VxwK*3p>W%z$-TTN_I5?0H zEZQgH;F(n`{TQjPKDG}AKZpb(GX7GQ@GfhkXhk5)H^>UMnqGNLg};v2^jT0HGv5X)zph(KgMi~r;zBL$D+TITN5P^ehCX`VmV?c zj}rKu!ARXlg^QgmSeH~2-?w+pbF!)eobuMkWJi=R`+_Xz{l;;F%3NsrW%^EMYLCYG zWlu=Sy*=C0_4#fAKaSe^*L7z#3X!}oTiG=dd@p{#siiKlk<*0SqP)@U2%$L^)Xc%4<58JQ<0))X(rf6HW zNN|6k$!Dw>H%@seNaF)%DxRb&{uLc*da96<9|w2x&wdM&7oRu)NumJ8dtFXb->fv` zaU~GQ+y%p+CBSL@x_->e{kGnulKC%3*nWo_(35XR9d=&(YkQE!y7LjoF72Y0$!u2_ zL7Hsu52sq;zJ7r5#Uw2$t+Y5N*-eTy31M`|cd$o=9Ve6n7jYUTM zzTo6|ZXPG&M$FSSB`l5sw8v7j!(+4o8Gp+jJo{`iCb!vn?S8$>{Sh72x-qG1=h=bb z>JNclFuU;@amjBan2$7j#xfwYoJ9=71`lwgO6%=?#*y{w0ul&$8#eBL0@%TH8qzmt zSKa+#n2W&~yJThUD2Fr-ias_h*1Ze@_=y*kb%ej z{mE3C)VxlL07mLK0a44IE%H?9|6%H_!`kSc_hCw*MO$2pySrOUaV_pv+$Fdc3KXZf zLxBRt-Q7J%8YmK63PFkl2>Q$CdfM;%=HicRvg~H(oHKLJJ#%JAWy0+=Y-lgXFspkn zlPneftUAUIc3ZO_a(;MlaA5U_dPHt^$x5FREx|)~&Y75eqyCYV4udf6p)k1wkjob} zx@zxN*ApenVo-0R0Z$*}bNZlkhaRh5ecwozu zl)h2YF4l+^%~z~8J%1BoR5Q)UL0(D#$RHp1r~FtNDbbpO24$$&#XXht#CIjA@U>(LBuZfE_D7GSOT6SoDJwFJ8Vw1OqO8&Y}W}ve0e^ z&a^xK~li#`Rv6MD|&@I6q`(y3)P5>uz>pZ%Y*CPvrz z)4BKN@wlLgaG5%SZmkv)B247cezQM=&ZiDGHEv;u(dhnRx=)GHQN6U}yS~iM+ z)5DTtvmm?a-@a(=wBtlv+4`%gxzKU6XNJc4zbk%2+1OGuC0~!Fv8TXL|6EPZv@|pJ z-hoPr2Q7%wCoE}{{VBim72&Ms12IM;N~WVL1+gi`HCp!H(%~y964f(=NqNkRKQN__ zyH9HRI6*6-#MXlXWjX0))5B62w1E*$3D5axX8-B6!Qqc5f(ovamxfFTzmp?&_O+?Q z5^RDNZG3!4dwjxDEt>ADwyrS%oaLPH>QuxyA28%)B!By*lgiIu#L|Oi`K|I&;apvj zjEGb-18r8AYk^C z5S%v5?cx4jI2Xk<%DOGG@qN#5QTEg2Qlo)nQo|OH5RtpQJZ*%NZJibUQ3qZ!OF#=` z|1(#Gvj{I9=>Zq0(NEbB&u7ie-yU!k6aSDX)fhMWeZMZAP5xwcK}Zjmy_}jT(p4NI z$b@hzec$W)_n5=zW^@x+v%sL8-tSxYIqW?)iROf5eWsR4*7Oc_RtqE5bDB6NXBG>0 ztqHFsbstW53ZOKl|Ezw7+>@3vYX5w$ra{s~Oyr@lSNru;B{oGKAv=BApkqbKS!N3G ztti`C{}+<~lkCd`5!655-(Mcz2%3`4(0p$E%wxksdH!8mbIfUJIaeEWZr_aTd2>!! zjPQT?xCf~Gg6&zY1s2O(y1JfJ%$9nDpgi=%AMxI;yQLy3n34wVfo-0S*xi(GX(Pp7 zs5lbHB@B+@Urj7r7DZ~`)swmS^(?2^$wEpyv_-co^Q1e!#Uh0ctZn`t(ZNSVHs1*A z&mgx`YI83@%HAG`&OAeV7{q0<04JZ7Nt<#qk(3=ki%4~>(|rmasaHQk6pdsjUt=)*RJ6b1C4n_B(<3q!PV z@8S2glt{PuL-!R*Ian+d0YwkK%@b&5RSI2)hxrVuEb7wkC(M2%wPTRZV!Ah)uO-_9 z?Wi%(UQ2WzWt@t6U&rTlzfXF6#MDFUDJ>jis_?(PTQLMJn#{KXd>YRgL22)P40GUr zE>3ci<1Vz)eRJ@m!xQprPvCvP>xKCELr<;og<7atJ+lNq=c|H>d56P~3ItezEdj5= zru7zo8ob`$C4zHBbxKSYQs$C0wDHV4RXq#YUR^QraWIC5mQA963+CWQTW?5vi8q zSz`sCQn4!`J=*SzeFMa9dcY@=ozvB zf57tK%TA)QeF(UOG@p$$BKL4YN|Z;#tUTDgdI$KR>2zB3hJxefQfYU_H1A=&694J= zx)ZB&^$axg+(;EiUwrHK(gR$(PIfmwelU^z^I@|{TkcOlvxKv9NkrDuWv9uca^6yR zXpwv;kxwgq$ivm_ZqK>Cw8IajTyc_bsb(Eo`BAL;{^FR^H;j(1i`YQH*Lz{R6L6_F zxJjtUJzO=qkC)(sCs^6%jJpZB(uZud^NXhC^Px~k?4Z_TZ^60cT#d1+J|jy|VEL8n zI%)9sFJl9ZQr9}>EW98}XCFXQ3+L-fBZ(rieBt1t1Bmx8Hi~ zmsTxf^U73?;cZS+OraU1hJn#dMp6A7)EC3Fn%L7yp3~_b4Iy{>)qZc4_&Jw(U+EKB z3rj22DJ&86-a@q5m>U)&Txtq087*V3pN>(hB`1v&7W&--9DZ+tDRh#dg4#9rT?HNL zVBqh!9>}ajiA7w4V#wYAzFtQYqw6x>T{&Jblj1$rR576=EGgF}(@QT6hAnXK>k}-M zWjGMM=cIRIZ!n()Tv(hPWbB0m`xiBFi&44zG>5-Fe)Wa+a;w^-s|y1OAL!*#-P={n z;%}#}|Ek$}{ zP`Yyy=fT(7_iUc)zj?$p!T9W#qEQw(TJs9{biL>#_aoNV{dp_%uK zf!rlRgs`lx?}gmOQ(({DR8m$!e)jvaJv6y-tlJySv`ZEa4h<0P)-6r`d5xZQ03na${TOd&Qrmb0n3ETP1gJQ$*}->~sfiFW&= z@6&pWEEEO5ARcF>w<%vlFv#3z=TCkVh->hz-fgUwcXjEgd!7Y-mZg^8)4AWn+#39( zrG!pG8CkX>GP3NPU-fOK*u@`{>djvGoD**>vmE35j?iWDWD^cWU=n-rxX9)cDYM+q zw*(AY=Tbql=i6E-5RS98mNq`)Slmr@+e+6ZGYhwTa;mtb(P06I{`HA46{S1O&4Mz9 z8Lf~{{&P4x70U43PVV`f9+(dx$zGQRUnx_KyYlvw<$ZN*aM@n1=I-*)*&tKN(kh7+ z6IXMU!NJ*4-RS`Z+-<`BNB)dI^cNK5yZ1*h|H5C2$}GZ8=Ya>3YF7IO;lh-Q+Af zBe1Z%t&3mBB;SK=WFVQ%>S_DRfdSP)oJD4kW)%97^DP~U?i!o==a4=JpVO=&(d`ZLXSMRD2J z^|IZ&O9kmTrtfLE-@^C(E{npPlBA&Sq@-z_!>L(*KCGNZEbh?jAh91$sXK0 z4UuSPumX+FdvZbp1B;FZehv&LmwpKm$X_ns32 zp#i7M!PR(6SUaaqk#S9(kXKAx&M$x3-NZ^!5hHz;#urkM(nA!UWXI<-&NgdJrP?)Sd&{vxm(Vj18pDzHMfaMrc9#3N zX|XVKxO!V(^ut4*)ywy_uh^tZnOIr(x5Gk1(sdsGlftLGB@3jM*yjLX#tgVQ#6*$t zYIXURFJQ46e{b^Tg-4T3@`5R}W8BnWKOcpTkFBXW^0ik4$Uvoqb8JdDT{iep4(`;~ zDcU4_Z6M*-<(KdNaSfkqaMC(O@D*^`)Ct_IdRix|1s9a(cJ<&{Kat7XZT-x3?XsQI z3~3Zn3zY=_kc7E8-}gBIR&2vsDbqOCBGQ7Lg&xUYSakALE(UkxyBYl*utn=m*4dRh zy}>Q#U`U&|-*(8>1$L4pBuG#kQuCrJ1!2n1f;JV95si2;N#!X~VXuj%fVaA~?W!v+ zh(20c0QD}XMn}QpVO^6IMn-45L+@k1(pbH2ij{;bt$<~Xpu^2u!-f-3>*hgCNH$GS zMJ(d>-wp}?yZ!oCiWeIp9C0+?NqoMT%erRLdxXCd9xpqeLe zZXIcW{k{V*c+y3T=IyVOH$(SqEK)xexc>kMBR-nY!cgh zz50Ul>hHJfnN)H}dkRU`y)&PWx}wo-;v$OOQ|etC2uUnmcHN%udT`05nj??$|9y}? zq0Kw0Pu0KdNmK<0Lmoydvugn1!rRfFx2J1&^hE0BN=;@X zm;g)~fy$5XK4D?~y)D=BwG@&OnrRF}&04?@Xk-y{z)Civ?yOKi9G)Da1=8ikV#2Aj zD-DVmW+4;u#na{45z^#wacPa~d=_4ey zY;T)y)fa*U@7Ts6qSrglTbt(*-azAEk*=#4D_RkLaYrxc1V!7{FSoZ-td~n*GrMrww4{lZ^g?7 zH?$Y?d5X^^1hymVg?(b#EsZ9TPpt|TVMmimZnN-R#aLn-@^JBbX<6y$Xcoz5(>oC< zB@qplR&{i+KXR%u(~o&Px#0Q?dL}bFyA#KbVZx#MQglU1(kJ=@2B7cn@B6cL#Eb+u zf%(Ur2ll%SLEQzkQ@P|UQvmOVGyqybJ&SyebtJj34M;P2-r-Cr#fnH{CjHp7QYLS? z9=Z)&D-t)O%|n+j8p}uaLgpxH>S^hqo?(iX)&-@1jLw3~vT4*#*5m>}?5wQpTvN8W z>Mx0tqqXzb_uX0MWLQ(CYm8kP97d|1>ZdIkTWU3|+N&Dgf-c|s9-I{%F}6#bB!>m+ zfc7(J)mqja34)zl|7e!ycI3m2iE^& zuYNi8?3dc-y_3=oDhjbcTer57(vI)cQkCT);vpjI`$4;JYZdJ2lkWG`f$?f8_=-D> z0nlHC9Wf0dH-8%YH{EF!r-0$P07UWV!MlLvoV8vzfi4n8-d!|z$PZ>Vzq^~wju_r| z-%#*@?Ji_|7lk1=8y8lr5$NOAN+PY|C`*CA6sh2p<4LeNJ!<8uF(y*(ZB_VZKH|usbso%ooIUPOw;LWH*06 z<GXZIb_)U`;uCx4Suabsk7Vd!CIwmJF1y6F6t%Uiy+XW?cAmV&j=`;+ zrCm)mT|XI=$F(QyKM{(OhHU=|U3`6tDZ9Pz3LDirNbfi-==EUFS8BI5NdInom_%rW zuEDp&RReLTVhw@*>Te&OopPGRD9QG|FL}5VTX7L5=?Xr@C8}0|Uf(S{R8|M}7$EOO zX-N;xWr78UAPq+vsaB#C3B&F5WCmV(j@yqW)p{oq?C1Mlg5Y*5be?@RoKJt%(niED z3nZId%ed2cwLq@}(d6$fw-u!m6R$Z@AK#;$9tmf! zX1L#)9eXItyIeZp3e+Lc<$Cw=Ws)6x>&svKP__w&3FQ}va<`igGgDPCPb=PVcSU=5 z@jN*aK&K=T=rU%a;l0Umz11O=nHCsE;TZ!+V6eAg{BzQ- z{B$K>+YQ(nTkpPhXH2(S2cI8?u0^-QrSf~N$X*c2YJpf|V}oJ0EE?YyJ~(-^86>+% zkdeJ1eV?k3{7s>#;-JID9OQW??n)_vlQ1mR?)i{WTab#Pkwo9L6!dQ<1e$mUW8sZ3 z6U5`o?uE;2D;mcSUe!j{ly{t@GbrNl##x5SeLVY8oFeZp>MevQ*&Ix+kwuq)TG-h< zXI16uMzG{gw$wj&3Uf=q0)?S56uq??5HZ8zGO+m769-5@J8}8}3+P@vk;u-?Ei`?- z6?Jf%(Ry;#B-m+XZ_8Jn%TqNLlo;v2s-#Y}dN&c8&-UN9^CbCc=Svej5Ag%(Mdr{mSP@$~+c+#z|x%mU6(yA;M z?w0w8`rk8tBHr@@J{#$<;11x_6HNr*dK&ct9`*(6UDtg|~D5z^yk?duda z#%nocZ_A(GHt+L<&X&%@7}n>RCv3}X@!j6vPj=YxrGE`1$_ph*1NLs)wk; zBwL}IzpMoFsl)o5Gv^Q5;fONTxY8jWTVqd;8K?IJy@C5&Yk}33-B~zO8D_8C)zItr zWBh1S{EYW7EJ0S|Dv=)>w(lJU-s8+?9i8^xuol}l?=rMfV(0ZPCCc(-1XGz!7FLpE~3|r!BwgxJt>CuvW!3_O_*NI zSL<@_nO4;~)-ZQDFzp%2fN&+LfvP-OugAC6=9)t@ps6ti2SVkpA%!}0Bu!Ts5v9EN zCwDL3O#~qz8%3ko#V360;i{*nHCU$SqLW6nn3}#u`6j=>tgmQHxB6k&JF7A@=`6G# zxBr-vYs}TLO01!*xUAWam-`6}UKuLk9#I*=E9V~>s~$^Cq1#W@%wPw!z&2%|)iZT! z5r<*YPi9?LynDarZai%P4~SNkJ$G=_IF_viR22~Pu=0P+Ej2=2I$W{*v9O1Fn_qk>wnm;>+QM>lH674(yXSH zuYXHxb9;ABX?IULU9hdWucg{L-@meOXz8P8xw!KAR1Dwu*+d z62`g6K8mG_o(2&!$HWXDoG7J_guFBh?A1%JSoAj6(3CA8mLg7rwJYdeK~`gvYR0T* zKIBR2emF4^c9j2$bM2^C-D>INm6~mHeLFRS6$lV1vQ#M02Mnjsv~mMc~u?BV7b09c>> zT_SefT$!tNiBqxqRh+t(H1N$@$ZvC3 zEfZBo%OHy$DiP3FmI}ZUa_~8Tn0D^Er>mlzOe3|td`Z1#^)f)hwLqAi1!K{R&Db)I zWq5$-=|c>nVzEO2YTKo`XP^y@ww~$jg_e9J+r-61U~Ny}@7WWRIp3|lK^mEZQoixO z3k#`-69I_RWqA;-lcL+pI;}+zZ`ccLcDy?8O<&Vg3hjH)&T5OhyLuKCeP_x3^Os z1nG9>D7C-s&hP0@8t)qj1-8HlbDZPc>6I3#uu=&LPfqO~z*$Ur4g}TwtsCY;Gc)LVJ{9q@=jQx$jZ7y zU{2K+L^w2P+bhop3+>pG6{wYPM{HfMI;}*KCcc4!Sg4VQ6wTFar&^b-J zum4Obzibs#N243$UYEE7LpJN@!vYoYP?Y37V(XLWV~AbW5eUuai(GwbCD6KdT~MmS zGsuNU20hKB#)&;NV%q1)MZiuUXODO= zjl_#`b^HSBc{y3KwG|aXI6uXKs0RzM$ShR{*H%GSm$p)iJtk|%RZFZ)`l>a7Et5aDZ*F4sOAwKMZdX=pL!Jh zE2TSG#)|E>#_Zj+NNK9C1TcRsk3sijb!gjLF0u_01z-;R3t5oX<0xL3ccLvUgC{FP zOw_ZmUG&ioj_!n3j?%}W=U-lmGRGG<^1N`7ha^;wg)~BJedvD1fQkIZ1 z#ZuZ_Z)ViBDj4GLU+LO#!1;ep5Z4%{!1YQVu@p)o;0WzB ztBEprCjIY!oVWy$_T3UZA^se9pH_+U6|kRYG2(?2=QRDB{DEa%8w|6r;-YEk650SU z^RdAK8birL8BoK2XCSR>BgxsNY$uqKMD_f;upsTzGWNd%q#pG3S*$;;{mUJ*+=~C0 z0x5Q>Z%9*%d7YU1^g%%$Woi~jf%SFMt(cQ~MZ0_j1@{st^#y1A(B=9Ifl)1i=N93; zS*i#BUGtMGqPh+eBZ?NQFcanqZXANJ^6{PjEOT8Cc4S6mP+Z>)#d^5EZ z8Ao-jYDx~4``sj>c}_{@*J?6LlNk~aHk8<+lTD9vFEUic#>)emygjLRcx4?1ir6Rr z3n}`1`u@jfoQ-gkS5&Tv`;T#B+0Jw2={)&9YVu|cq~6aqycRmTS3FqceF^lUi2dm2 zrps;Q$B z%9Cw`Re6AgC)n=6aLY>R<7e?Mf3etM`CgNsEWTO{!*N%sbx})0^ZS9EIggc9b2Zst z;oeBXL+^d0MoBP{E#jhzJ{V}HYBX-WhNW(_jC7nAg~@MgVHTFaNMQ!Q*4ar$Utdxl zoM>B(Z)-BsL|T;cE+s9sBN~Jo>SI}IY~+gRrKb}->^d@?2btEcFrXvLix)e@CjA=j z323EyCE_}tO}ueGbv78Vk`v@2G$Nb?ynG!W>J{`ft61iIT3uG1V51>qjj+H?!8RLkW+#Md*n-Mt=nH#WwQzfwXgSKIF zqixO6?hr}s&x`?Im@AuJ5$v^d+e~RYjy4M`q)?7Yx$aY};Qo28X;OX*KwXve>5|J|f z^1ZTFXvF5h(s*B&OSac;iOF=lflfg-=Eg2!JDzm-1yzCY`C-2)g!}YW;xuPu%*8{1 zA=zq-IIl=2-g0$%PE}vQbtNCuYp)hterHcXZhjFys)RPe2tcr_=a&0t5k2~YT40yQ zw_*;2aIq62Ezc(Aw-BhQ!(I)DiW6G&`@eAyEI9UjkqH2jcwAYLMK%sE`2XU^wS)?K zd)TV)5%I)|;rlyoRl+aS~z;+92U^QEGgfowkBP7(_>U#3lKnQ26owGKW z_(&NW^ivhYPini$e;nU=1wH=uULU8YE%>g{u@$+HAzufV50^2bWH{t(Vv-`>tCr&E zq3l{}u;F1ugG}G}HnH~sbR;dDqGC`$8XiSkAUqpzJG-X$>S~p$AS%50*Q*`l#T+td zg166z=D~Vkpo^z37LTcp*@Iqffq!`=z)EyB#Pw4f{m4vD2CM+_Ez@Y)DKodI}3|QNzL6562mQMVMVv z+l#p$t|^{1$~Ii-!AH~2+uRd3VgJTIpvBior{kQq8G$SPRd2?|DC zv)R*C+z0h3v@-4@vTE=(F8AP8iu0ZboSJDss61b4Z;(tMmPF{-$eOepRwS+zU2#ya zzZwJeUdtdDKR_$_>5@pzf|tg?rVaQ`I+AU#)8tHP+LLO@&qv-AjEB0F%rlU=Q38)( zyQcIO1g8-aqHzv$V?$j?n3Bh3_+wC>DbprZk7P!=KiWL?T&2e(4>SyfDKKw!aocuv zmdsU;A@B$V1qHDZ|?NuLtnpGhC;I|Sf%TKQT4R__F|Ee@cPLyxgEL@u+J4i$0LI#d$V7b zKm>ny@;KQ#GX5G>#20^nQnek>>A#ZA1%vbk?@J~zxTtG)5bo-qo+OOi4u}`fqZ{a$ zDGhyLw4GT$nyknMf7%^!oBr+ODESj?s-Zn5TwtPqzjIg}8FL;)!C~uqDM3cMp2^XA zlo6zs{GC}!!jFF)3jci$tBUx-(`~nwUvryN~kE z=`1TN-->`B$@JZT`;x8uuJxecejR;T zkqDix16DtRH1-y!=|>YHy4y)f-qWTj8QEuDtQw!SP=%%0yRkC%p5e$ZJ| z9butKW+v$fEtlX@MN6_i!4lXw*Iad-(F|>+TUSPS^XH=19xN2(E;G6 zdLI-&hvZ3x>}I|_xG|>AR}os&d2)8wpiOhBuI@ol4JsE+y4=P5c?yzbIrlkK-B&62 z@=uOCgTAzi^wHLh5}hmS;@;dOh8;rUz~j888NrWn8H9U_x~X}q>n6G%GAFEVUye>F zV{{tjI8G=(ooxe(@wL=mjGzE?peJ^B!;Yhn7;zrrH(6P1Z0x^u%odHc9@gNKdUsS+ zmBzB%H7sl_mv^E$hlwz6xWq~Ou$wMe3G>2iZN=+Kx3|zf3NS6AGUgBqem=b)KY^i-%0PbUxQ0{@s-xNp2%;NWOA<**-De0TxrL z!fxij7#Q*6H}{TV3@PUEDK1A)X&a zxf4+WQ=Mv~A%_X(|G|^8f{pYmj=FtBs zD7LV4EnCIT_eyKqJ{Bj3-gYgwEDz%cv zr<((~K7GvYs_0Hx`N!tu2Q1K(RU>xVF0l*b;df=Fn*9$UGfB_e*`AcU4yICD14J9F zgDbeH$S-Je34-eu3pjo?pxY(>69RA48Z{N`@^SMJ#f!JJm_AUgC?qfS>uP1g!FbwV z?8er4TJd9?^AYvFF;PiqbotYzddSd~k|mQ-$7)S-V6Q|H8-fRsT84;p;@oa6AIB?~ z^GI70mNYP9pkxpcb~W@aLfwPh&J4|Y!jDv!btf4#@WQP`Etvp}fd$1`x*oW^9`(}^dt#!YA;2}9E}_uZ>e7G2sV72Us+H=YvfI_c?jJc4fu5IT!{cMgt8IW@=P_?kWpk=5?jr3DYLfQkcUHx zP|dDlYD)nXf*Hu~N=`PBc`y%YX0O-joY<%t>)6G^$pt1Ks8Pm%GkHivXxiB}tX*r* zVOK~oIk(MvSM{BF$K!5wz%i_O8cZ193?3}r*49uN5hZh|oQxT@{k-pTNfXCS^Jd%e z;k6jqp|{ZDbxUh*w+F~TzU$tG8mON?%14;{G%au2SWZABn^0eRpyzr`r!j?{-DtOz zumm(_D{{nebibRkVq6gtW{gSY>rsU80?VmUhzFz0nb-`cC+)CRcSG*BL5!9z27)Ty z$B)G`oMwqbcy{3VlhsPC^U6haT_QqFB?gVm+?*$_kQ;Mzr!9ATo)P01TE*m2@(vg3 z6pE7rr;3`JX?Zht6e6^K6w5?fy3W-M>JPn()eg7H0p4D%{H7t?+=RxL0gyZI=3LR) z?Q6f_i^qMS5Mz{k;v~a|qJwOUOtfv#mutV(3Hj4?{kRb{Mg;Lnd%E-fFEqPJ@XZH& zI-964)B&N7RJhcB-!&G(%AnU^n|G6r&H}DcP$Kp>t%8=_t~bA#v@f!AuvApCzmCip z*$b;wcTlzBEgjFUV9S9zFxu{U>d2Qy%Op)SWU*m5%L{~RrxE4Ey;hwTRxZjzt>4ra zuWQpbGZ-oGN;_AoMVr%sJ#f`(XAl)gwzU>keDu*8Obd8jdj(-CYD*3#G;pb|OwN@T z1fl?Bs=3Q0$qEdEKTQ@1^AP`>C#8*^l1vsRjGyL+%&BBUMEeFA8)u$q2pMEaHaE)= z%qK8dJ78KML|AeOvY@SF*i}D$)nVx$U@n}afYX{I0gcQ^yzIRR40^w~NLdq$E5FSLn3Ivllj&VdDcG0z z38qvoMUJIW$<_R6Oz=aYd|Y2n2U9tA=FRe=6r%gZCQj*4T_>YrSfhCk&QUT$oQAs5 zML|zj;PI72nd0jaMntfyOpF{VW<+*^jYVf5m#bX#_IM?`Vqt#ukLF@cqtG2AlP!H~ zEDyf>cZDsjguj^-8=b%Xkhyt}oeO}srs4G#JLh#SB$A0n--g!qqrXyPsn1MvhLf|h zO|U9`zM!!p>&!XCyyz-0p@@+$);BiQRyp|fbM=?gx8lN+5WhRv86xHrXvc$>g~1sv zty~F}!j(Q3ryMch&)Q5qaggn~Se#tdI4u8C&~!emNfGJpDNZL>1_j zwtuK4B7d40TyULHn?E*lK^9pL*R(Lm&dj6A%`^=jy7ghL_86o-KOe9hN;!xd&m7&B zM)p#3Jr9sTE3^}j)-}bYv1!ofjpqSgv9!XW$5x5 zhF8Q*k@4~j;x$#md|J@i*3m;eumC&F>;65p8GvA3-yBJJee10H z@ki=u^Fg0IfgX1$AWmQq2lZX@P-_C^PHu?j!3#c>+&f*=l18$vsVvk*Y%PLtgb8f! ze4WIMBQ3nm*BJV^Sr7Vh<<0+dEr9!Xplq^^4u~IB+C*|DF_9wowLH-DbTBpQql@)B z(c%;Hw!aj@I^hiNCmzEr>*)xQp8IW&T`M+f8Z{Xjq693}=`N4Dvg#U9VRAQkpfe0y zUUlUbh*3W}=_fLh?Q%bpI77JvYA-7(tnc{f19)!hM1$AqeLA;9?xUOj7GFui1=T%O zry1=0F|NEL5_kCk<&TFvT9#Q%MaQ1|I3k%KOUW{sO=1`T$S;4*X_B(Yab^;Mho@Wu zKTo!dO9BhNH@o2Vb`u~4u<;h9F180w)+=A$uOX($?m zj{{CV4wK;DhS$CpmUl8yFqK3jvlJH;J>#@|dSr4vadz#2+cA?E(-+l=##m~2zM#)i z>h?L&e3#!^R_zyJyF#4dDoj_Pqhqp&yFcpheQNJ=^Jl&pzJc80^G8z&dl2CeHrS?< z+E&z9hT9SkjPcjosaN9e1S#rpV&8yPTFlg6(n~HXI)_@P@XufLw^3*oqmi-E*hDLL5%CKsW-{r z1rDN;wOJpQcA>Vet1Q5oN;WR2lZ#ztrDTw=88cDmomM-_tMf;`de{VR64t?pyhl>Uv~oQK53&0*oZY18A*Ugy^1XnQ4|iCuT;DE1gT zuqX4DLJ~vhS5GcQswV_Xfzq@wUyKi%Ct1}mF31lb#0Gt7!w4;=WTZg7HWPl z-z}9FCkGjV8QD5TGEgjNxt}@{XSSL*Lw0SqG>K^E%zeudK&zpRTNwXsa+IL4y|t#s zMhA~5{wioy-$-|hK>Yoc;cZg|;SOBw+w>2SYhf#iWwAz)wh^@Qs`A)Ga${>0&in>i5P9`;cd6 z;+AOt$@xZb1+KYSw(rE5hxLN#GbW7CT7b}+qgf}J{_=IAp~0)#-b^%PT-lU*`0PBa z=Pu;w4@*sQAl-1pTXw+;bxG@G_3VSB70VR`+r(mEI@W(dN0cG*sAAnReLxeo;aI^_O+mc$d%hCQ&|_M-?Q&Y0ZQ8aJ0;EH_bH0eTeAV8iQvX z^S!}92JH>I9h4mifk2I zrSk_=$x}`9XYB+-B(ytcX9L+So!zjXkNM>rTefFW3fu4#+Yv=Pvyz=-!2Q`6Xo@h_6>YE1cU8!>5sh%BCI1lvwl zVKbvirhtR+tFT5&iHUsfIdS5$N^P}ul4V+H`E=eFeM;YNI)-$4y71gng zx}>{@h_0)S;9VS@L&UrjtpKlpmYykZBT05{POE7W4^SVV8nJlX@n}v>hJi2ZzURk; zj9EJF$fjhSVKi;^gMNugDrw{pLuYZYzKx<$gqEHlL1l`=vsExbZAO+TqmDK!GL+f5 zraW0jl$)o7No#1=9~kZVmsDw8cxC&ky&Em7_JPp(3>s5dzVv~AVNPE!_cR2tjZ~c; zmxgW8pm37ABO97XwWg=3ZO2gQDW_bjtULzRt{GjM-M znUU}5W;e}_Ua32O<@zNjOv3I5d&u_h{#C5Q#@du&5@@M`j;80{pI}*yIM0P+?_Pg5 zS48CZ_vPx%Pxf>foBbvP-<=W)ubAKB*Jc9k-CuC6A(s8NY&8)+TPTtpZB)=bT= zh%w8RG&M6oOj^n~RneF7OAI{`VxQ>{&4f!z4|lr*WZqHdo*qL0b&V4FDi8`%pB7Q0 z)6ev3(^J^$(?8SGy1b6Q&KwKs#N&DVu)RJhz4c`cx|sW_Rkxg#-6Oy~gnQ~>|EsZ+ zf`%URGA-lk)ZqtuB;99xkb-`1-JAc0_fJs6AgTGgZ!WS#>9HKjMH{+1X}kRm^vhpB z;z0Xc06W^8!e#%jog|68NDE1loUINu0ui6OuZ;}p^$kT~1-Zhay{(Qc`3kn$DsI+_ z8cFLuK6XdIKAeWiH%k$z;rjGf!{8OE5ZG=(p7=6kId%MpfHgx!4Yn$<3B2f(ZFQ9O z*1!De-Fq)tJ%HtVFKf}1G={Sas1GNkV-o#p7yfuVt;ZIF@xOp>(i}RiVjPdkAEpdf z1RO6|UGFAP&+YWUN59OnMLz})`$R_i2>fjMsl&x7i+SIZJ-^jHJ8!$_;y#k6@`Yu( z6hvjxeg@?$GZmR#S^Gd9%@j!K<7}YFn(}}!cxY*5LV zbp#x(upH56mY9<~vPZmedNLNn%+X$dxsxeh$@(bMF!l7h1Cix+ca}z;qSeTj6I1{8 z`9&<1an}{_EMraNOw%=h&$6qdiNxZvUr%q8F{78gE<@C|VaaF4>=w#b`(}LwtZ77; zFbs(?)p)AW;KZyEJ8gbNOI8Rx6!r{#aHuAbmR*gGS9+$0BX03SH4v7dZFw*!qeyP`b8svM>QQ=?* zhRbDsUD_B3q(>x867TK8yYnMT#$2WD=1*aQVC_|^yi9D)sk@=-E!pFd!d2@!68_u3i-zdBji7c#K!8fAj5lZ14~o0p zF*mpT)7S)l05%djk*KI;<(jzW8+pNGyCxC9gJWFRus#7x4bLcguGJXB?q^-XEH%Rq z|7E-ZxyX1S&shH!zI+|_!O-w)!Fa{%QN|SA!7*>rSC%G=r-`aEDT=CK+>yBYs?v3G z(nYTaQl+;`m?Hv%Sfg79RjeOqV~dP5_y^G<(>d3JZV;)UDdXBT{7nyWB{Hn}D$62> zBuxwk`fP<)Dk{AJCtHJ?XW$2qkw6>4IuiA$u#M{u*>{;HXhGhvtLDd-P|+wB)e-#X zDUHZ9E(V;yYs)wU_qG0XcsL!9xS)f%jTWy4{I0Eo6OMoxH2gfoflmX?p-JvtUhiE0 zYV7Oha(OR(&c#789SJf#LcL|=WBgy~cabY>s5a>o*+M7E1Fb5S6U{`tZ~yfCk9yg; zXV}uzEe)Zvoa)GVYvR(XoURQ~C;=_d6@3!o_+UjPnQQjdHPeO{?>+JZ{2)cXQ|xcR zUPx6y*U4(tzi8Y;%tV%^Rdexxm{B2pK@t@(2*)>s)Rw(NvpAUI3c0_H?l$R#-^8+I zxYEiZ)-^xmLkf%4;=~rYpfsS9VQBX}U^Q9=QMvBx+v*U(X}j!568Yq){4J!NQHbr7 zNaEsQD*NeoB;NfoZd}VDhE76RyrH-Xn|XUEa3Jz}lD|4u_MJNmr-4l=sk&`jg&|ep zbdRcAg(h7}`-5bh8!Kh2Ka*2~4ULd-b^<*jQzc{06Pz_o)N+$7y@)H{b1y#44VFP( zlAWY}_>W5b)7?7AFzcOW$!4@il%YgBGOq-q{#e3tDZJ_d^rRq_Q`C`lHPYfwI-(+b zKd>j}#Y3Z?BqE)qvJj(5LML6vUfs2D3q5{sT|7rJkjZ?$+&!MN4NU)_=x==QTSDjE zI~&Q-e1+!{BinCU+I{hTNMxYst+hy!3WtzyO7N0nA$lo>B|Bcc0qHVjN_B_LYvJID zL)FC2USo-X{k(<~x{p@fi{n-klFL!%V#O{;8Cq|0=<=s43<63%1#$mOe`t8|j-Wn# z{(xON@Q$t8sGV@cmp{Mmg>HHaDW?-vUh_Q>^NKP0UmPrn*!W>e?7HwUaZ`ajkUN>I ztzmF-?f2?|v!y5PJ4IuL+l+g$jZ)@;|nEdk}$?xPal z<nIA-Rm zeeJ#0xqj!`m1@w7M_zfz;Cu3QHu#w72|@;rLsxj{%59gIvDf zqD2_XuG9WVGu$w~)m-YRQ{0jWbGR^mSx{WB)+?IoW{--mh-tHb2TBM2)H%Zg%Rvoo zlAh-5a@qD_X2jQ}{-(>rEU~_hMKKD?Dr%G6ArP~IZkEm`1UOL{ zkf>1|liA6MckjR4@6EBte;FcO_!#~V^}^C zF0L`{-TjNSHAK_>G|&h<91T8%yJR)-4KlXuMma^gh1 zhOVaJ+tW!K(>jh@SOlibI5j~(A$@84!W-T z-p8qy&!@<;Ix0_;JpaAQga#=-+FkgKS`TId8UxsE1xM+G4y72#|C=zT4nc&?i@brD1E3nN{ z#1&%zQnP_HwWpj$hGtNkOID?%J9dy=tK}`sk{b<~hGpAQH7~QqrZe5&B5~W`=3Pz!(16yrPrBpc>gE1huLwdiVCy(M0L1KAz8>Gk*nc zSWO54gfYT&l=o|Q0V&?mM@lPRUSZ(qQKt2gnUIrOhXUY8cC!UT1so1jmL)$ zXFG&xn)<>kL9EG1iN{9LUptSkanL-UB^@x6W1$jO0e)l@QH zveDKzP%dw}dhAYR+RxiA+ILrVRG1x%Nq zZmyckw9HhLpEw0%iUVT|Xx>M0%h$!JK*dw{n_;GfsH^J_z{nEnYv*JaRPLy!=IZGA zX)$xCynDP(Z(jOhJ*`52ZgzcOEsa@IHn zXq|yLT;j4p*yv$fwA^5#t0Oo-1Ku?I&Ik<@<4ujN+VOM zDt`)m6?+6w5njR)l=_VVypS3K*_G$*IJI+5kh!gad?yK#i@$2r$hme%{JA2oLH9BT|AMsOmClr;i@HPAB{C{#eVukxZ+V6BFqe6 zJcO7~jt8iMGQ9C?Y?ZKYjBV@p@6OVBum5#Dc07N~+c_gR2rrK%zHT9Wb1Chr4eB|$ zvCwJ@_0{wpY>0*=_Be%sMD*_$4bo-8mU77cfO)@wj=$kWwlssy1f}a zoJkHisan5&salVky4ou1=&EAGGMyVIR0D%FfI8f!y(<;|V00$KXMt+efQLP(m?F*x zXD{21g*s6L09=br%p!MTnJx^{w8RWRLXB1i3ymUEXqzQb8cf78yaAf%xbk(v5Y!9!<^h4 zcCJ61tEys4$6KhjRdI0d7Vj^4420BvbU?QD(9WU9a7Px~S}TrhWmSmVkBZwhn8~z* zqjakqGXT8i=77$cjakoqlZG+J4R-Hs_hId;eN~df=cY69CovI>t;}y8V8DDqT)Hu9 zIu7;|Qrtr)PUpzM{9CoNT=cE=@QGN2>ecGMH=7Xx zJW*a=(^HJ7h99vheO6;lLEc==CB^mCb-VZ1J+2;2k9^h`8h$pjF;fqjreL}k;#E<;#y&YjSvIRMI3O*7Y0Qpc?~vXQd+t|K&PA8C>!a=VhM zJ$;Rp=g!AMP}l&guLYMk)Fm*|9%7kFH?~x9ER5=B4v}wFaBP=z-WSMwxJc706A8t` z!B_Vua6~LqiAR%;aMIHSXzlF?G#_}q&4N7~r1QA_JuJDTy%u=k@l6?yij1A+OeD%y zkh*>nUuHpH#()j&EGyw@9c@y)Ct6v=4Ok^e$>|Hf}~ubiCYOTo4w!em9b14fZT)=5BD-I7X*8GI0dCB6Djraa{p1~ss4$q z+J#|V2f%GNLQnh4VrKmx79umi(@8slYfF1&m-6==1X`0GxYX8MdLt54+glu*3)0?a zN9aJ|l1E*EP_Ipe?t~wInm_@)VTUKC+Va`A(J!)7@!9!E>3fykaR& zU1r|ZIcv8tsm4WF7pw=aUdjQ;e1U26*=&zv^9O7SR~OgIv*h5#z~zI`%S^LUf!W@2 zsx>iMtguh3fl$wgkfS{%U#w{72>DNT<;}o9eaNyaeo&cG@rueyqeRkN!&BT?nys`+I+JU67ptB76?JoBx#2JZFNYmbtp& z8r9iroBLw5Imw81C2oi`Zn+$7LioejUQR5`UjWay{k&_r-(P7wV3mU^iU~(=Z(agG zt(;MT?Z@+-ZHZIB^E;Z#`^0$dlRwkmVlB5jZ`@_{IR0QVP>$U7ba;^YnJVRKh(d_V zIW*iTFb;Yvvk+9EOe8Qkl>rw?++oG2)`n*yQi3cG?sDh~fU@yzSV($do2(l46aR(1Qi(q?9$S%6`Jl^rgOU~4hi z%xd6KpPl+7ydAnZ3D+DuR+*NdTBN495>gC3-X-q!afL}pHZ#l;_y$K=7LLfkZc*3a z=gCa;i7C1FP61@ZVa7Q*&Zsi4Cwz)lvtSbXw0?5_&1WgrA}w0%FEa$zaWBrjTp?`1 z=O;~{KGA2#tb!LW1Df`Hz5nXSUr`e>^G!SacsEOpNr}?oyqgu-{^FeD25G5q z1;T!!Rb!$^d2sT2QhBP4qJ*Xmbx1*Kkc} z?J`dB!C3jbw0LM!NgK4T>@M|P&AX8peF(Ef7?ak;%~(CPhznx@zSdcPPlePj_vz%} zfHG4KGM#HuxbkPYXe&&ldi(ykdpNH!hN$aDhyT`D#|vBCb7{yhF(XZi3L~yNW=tK|&wx?8D>O;-{;F=M zD1&kkoRku69!2QWxNDL0vVf7MEs-EfONiTI&;14laW}+Cv6f+0xSqWvjZd}q)B<%T8F#n~OaCMn##|Gyg9KSa$*b#q@BB{@#YKNn--$d}jSvJU zOia+j&qy=aJ6Dv$ojvZf_>NPm6Nj&@$F-@#;X)&?1_)J9Yf?QctYXJbEj#=y0>gF-^;r zQA`iV>Q#E-9{lm?f9*dstsk;PSRE^rQ-_&Z#=DC!3FRRKkU}5@+}VKDQD8xM?y@V^ zSYIkRMaKBO55o2(YHL7;!Gx^rFU>Z)rFINck+AGhfJ)HCBhC;mp6G5dTnA}|l$0#v zPF|D94Gy08jWL69@S7plI0o4Y&L@SlRs&pQwOn=Sq*1E8aHp-CVh#JvHsLi2t-Tvv z_uJ-L30fD_va+3{ZJ@2C2^YypS?si>O3G0)P;d9gn!nK$EZw%#O7d{|O4iv|%4WT) zH2Urx)1S^0t|F33<)j_VvH)Q@mH4Q~P>uVe-?d{2WOs7BICh4T7D^1u@(eN4*|DcA zbSf+1{aO7@@_K5k_?k2=?)!HmzbBmd(~)aKvhM7)8IIp{nE0rkk!X0%4>k3OnxRpo zR^BgWNjN_@W&Ku1K%F@dDO|A`9~dGqwLVA6fvtnjFS{3m@7`TD@+txs8)IS3B`zkG zz|=2P?~$Xm*WB2$9raa1O5|bAdr$_sM7J|)e>yeycd@w$ko)|?ER zrfh|Mm{|*HN_qmHf$o@v1aZ{a_O53{tp@tKA#oKftsJg)5?b_J&MkS$AQEpNOp)Os zIqypw+6Dai&rDy9c#K=yqX(jR;$%g?x&Piz(j>!6HE)<38EgFVK@8=-DM#Zjk^~iT zI2RebcK-mQi5$?VD`Gh7&P=MGntaQuohoo_IbN5>C#&q~?EU(fyOP|pFMVWtRTtZ{ z!M)J#ovOl{cTA&4R^=ObpK2J?s=_AK6v_u)MwqKGr2LY^J;`QHOy*LVDsOoD1>--L zlw`i~1JB=2;S2^g&GNY|3)R1&&hnH*n7_3;c}We#0guwrB(<_G0gr%H`g__BJmYR=`RXM&B)R^wzq=%SN&m)+e3^QKh0HKYD%(mjj zD@5S#UGHgq2v_YNW3Cjy`d`p`uBD;ytC?X=M;lgKQd?KsRtBDf{LHxxpE>4K{Mu~V zD-B9!<`J4M-|9`mx1Q(a?%}yV+v$lpWcO{a6neFs+`HD+O2B;B1CuZmp2!9EDJd4% z7*`0NCDGz2A24gbd(X6VE{QE#Qa^bZEM>CY`@mC+K#8Sv*c29n(fMnq$Xcc7@8APO z<`%v{6jT4~=dUDQ#ItFAN3owzNJH@OmkD|Pr<8xRO)d5Kq)1^eO&L>U>vzIGd=)Hh zkm7;)A`AS|QFx=@D~g}&r;s^paLeUM`8OA&{QDT!H~#3?Qgrp~w-g zOzgQSG?&gKC(Sa1{j;3wFyL+M&^8n6v6J_)^Hg;%3>Fqt z@QWQ|Aj3}3NH_N@lW4e%fps(v&e`5mq4ZrU&&DeSep6DW z+R&*F@6FF^>Ae7a{j`e89W%m%bAAdMT*A@<5|Uz(8Cbdh?{1SGkqi)9A|P88D?l2} zh}@M;Hag4D*nNTLoqF7#6W*QsP!SQxyu>|vG@R$CZ zjf|4=0pQH&+oRZeTn0+&ruVIIuND@nG;-z$OMdEGH6xwNRad}g>&W#dzuw855xTzu z|AIn_Dx2OU%i9vt7jeAM@Wx(l(rKRWLx@R)9=~Ns-Q0FU)m7@f{!FT&%f$e2 z_nZL|bF{uxHPlPNcknzbmX_j*x`sgF!gx|V+b2INKU*lNt*%3DTiGFl=cN z5vmvD8~?@4`JPhZS7mSMfZVEf)+G~RPbp3S*8g-5^Wc1zr(_Hf>VrcxGjk>;gfH^H z#wvA7w21S#IddeR{2ft9sD%-e7lCZ*FlCkFkAO#ViU%`t-?_Qm_1*zxIyyl@(1wf*zMs==94} zwe)~cC34+)uU$GZ#uR>uoJJ`@4?Cm%^CY?^FWYbL1j~z(rO-L>jdgB^Yf!pOm)k-4 zqJ^vzB4B^;azNQ?Kci}HAQp8__s3i6+a;h)$vxb4y*`e~wk$2j&vl9mko5#10!}U{WO#;Qji+zGYYi6xpCkx>*$^BVguX;sY zXWyZxXY&Vah8G&^>?TgHc=`BlJ2Nv)B^u8Id!IWyxX|Qf7smI@Ar-q!8YTmZJ8=3=Rr_R`IvleeZeHS#)&}bF|_?xuKt2 zz4&6s?&sp{& zsX$bX_pF4!X}{giOGyvQnk#Pz=6y6jTOlHOx@{80w0RePczmuFd5j<})0hMM5z5QF81BLCKCaM$lO$_dYUY`b5zgR#S>Y*m*`@}f1;bj#f z3o@Scs{uc%F1SkOARd08GJ{nrbDXpGw^l`=J)-!7--Y{f%J2d{X=GU^Aprxdi^ZVM| z{&EL|*jR5M>g$iwdrFUW)oeCXXVNkAYbWPi30U3sE*5r$NJ-i#)_+|A%!Met_e17~u_H!; zmH`V8%*St^M@IT|xwLdr`8x}(WxU2PYeLZ{T?G@1h8hkAc~Dxc8{6OMqvrg<^&`wY zZRk~i76EoARL}C9@XP#?CL%f7(JYB?MYt!7;J|Tm^oPc7mR0jNasoNO^vh}L;+NQI zW2BdUnN;%1Bnp~O0w1Tzf8yJ-H{7a07wVGVx@^w7syHP(`e{8G1 zqMv&e)2Ma|BE`Igy&`7!DGm-|UdFM$pnl+P0wlw`}1@u%WPqjc1v9h5=Nd)^Uj1_j<|miXd5m zFL{V%%wXLmsRuESSL0EG9BkAY;-vEZl{g0r^7zbxUZLB zo*mJq+dek!x}Gd<@sjR3#eLxxkzf=!*>Wx3<98w3v=aTRX$8y~V%&{oO$eM*co6?W_y>C`~lj??7Jr=-Te@7!R+@~K3E8fy! zGf-qsUUO4(na*Ze=JdkfyhpQ#q*mYGc~PLGs7~XCOE|2f>GdSx$G7$LmTToC*> zsbsHufm^eZmz=gmKRw59 zd%VqgSN-F*)Btmj?eDi?TUC704nUqNXHc^PecYR#M!D7uRl7C>t>XVjiAXw1#ys%B zJ@|5|vyME_XGnb$@p?H!#wS;ZR7E`Vp6C9`Q8y#nK7kYcSsSJ=UxZES_}>e8!MLJg z@lgsJ{*?ep@#CA!3st_3q>fk$^Z$m&M;X8T@@GU4XXWN&R!lgs1RWY4EdAH)WGN|* zmAnJ{J@StI5rpi2zxgNw>WKS~gn975kLAI`{}`4B4FWpogphMsxw)y}2w3PJ#6A^!Qo|9`lDZ`1#` zn|YvY9^bzC>OlwHLyGRYe+SkVPzS-TGq{*?T`$L*kFdQ@I+w7)1hq{4gm#5#H!3rS(|%zb0Sn`dqQ@^Ol# zl&J4Ift1(t=|_xADi*B_ij;NE*EzY%tS`Dmy6N;>Kb1y%DkNJbW6RA8JImjRn)3X6 zI+EHyxj(*WLWmWS%%-6xNDHcZ>Y?Ei#x;sd2yUYx?|RLgMjx44d?TtoYV6B%6BcZfB;D z(-*LZc|6AL5;^kuBq~1dGWYIUubQ^H@-vz?q=Pb$qZ-4NHB{>A_a;RNkepoJ9d^&k zez0&@*R+)j(GJSnJ_UVx%D#?UUQRo=AR#OuA|()~aG0}4^6#*ziBI1U(7T;8c~MzS zH%pfkA+IGLZyBZ@w?NZ+pL#A+3v}=Fe8x=C?qPmDYGPzdT(&7sWN9p@jskC^VH{MO zc0X`0g-%L=_MAT}D*aJ#D5r0<3z|B>aIL?IK~0Es)U6|l_Llp}M%YUlbkihcm#2Dh zJ&jOnX;FuXcLTzttH3xbyIeN+fr~ayvcYrPi9dL2AfbA=Oyb*RIcHIJ(M^m~pDPaX zarXYqo-^1!G~hDZbmabhL+7leVDtm?2#kkMWiLy~hCbdFThbH*cR~zpWaP=^<2|a~ z+-j-WvC><6?mO*$L;rS0&9i>t0@0OET~YDec~|4RXTv_;2U*GGt1nd+ktK3gYkPbcEP$U^DU9F>~F1^{aIuHr=FYhD0`v z9|4)pSx_o;YS!IDol4Z~J<$)bo@Ftfe`c33)bgvAPPn$DEgiZi%V=7+eM*^m>Nn(A z)9{r2v}~(tF9A@n*M?9%VTn(dw`FjX>P;)!(nbO_e?yppwx9K9J$$2F$}bYF`26hD z$-%zW-S%VOk=)tqEb_+@|LdfDBdt%=k-zh^{HNdL5^cnxQ7JON$uAu>5qBpQxT>q9 z{n)dZGPA<$w{wp6IX}ZoEq%%SjBaieENAI9PkVZYMO9zlpRaV{GSV4K5R|?aXi1!S zr9eSAOzc-8*0WjYXd&UdJJsaaKtR7u`8oID`vGUvW;vUMp0ek#{PB0;k{)-lt`0#$ z>qPlXd7h|%9yEHP*|TTVx7TII_VX43P1Kkz|;VIk1j+vqVD6 zY!|j-n6mt*%CQ34!Ss~RTDD6m5xTzQ^hgS_h`lKmP)0TWbAgqm>1d>6 z3qMsl;z^D;M2|$wavnn9TdGE4(hr4L?{j|~*F@%?a!_~0a=tK7E0d^o`Nd86<@Lc4 zIrGAl2OvCXAo$hCdAY|Sd1ofIzt)lLr=8Gb^5Lc$XWij&axN39dMF3^U+9!$kvBHUB~~_zovUekBjnWOaGy?#)JS zvzsdCrb@(0t(^iBlbQubW`WB0_|`;g_f4z5xcEEUJtt^1AV;^DD8bb-rTOVq-e4@)@Mqtf$t+eE-?^q1{Ym$oNfYSQBG}j;@lJFPs@$R?aebq?%b^w!aCZ zUT^*5X@sw&DIwmIpmGZXAO4&ijgqnt<|v#-P${$kKXRXsTDeDzwRk0YivtiQWLfu@Bc%bga za`qfl_G!_q%#pbUI#Trfy8hcbg6t&djoA(vsK~F?+0qBrP}%$&!-7 zB{JbU5;PFRAs|%XcGbRel9YG)UdUPQ_DQ>@#&?0$Rqa8c7u9=@1vElMbx0E~f;r%# z=|jx?vq3D;-yyq0ej|A_4hHGTyt4YqJ#zzJt!Jy8I-A1sIjLpKoX3waZ$PzfefvDw z>*@^YZ%gt@&8=lFcL5Y#-2uVGn-K0l(?TGiwVu0c#)Y0LW#}y+Ew$ZNIu#z^w_h&R zcVA0TiP%Y}YYFp-+^?6~D#BS>A2ZNLke+0k&exW}gj}&F zR*jZp!n+ z1fOQ0_02qkh7bz z(2YRbS3y4Ot=}%cI5BQ%#gAm(1iJVYvyDJ%gpo!>4{Zt zL8y>&qyP)Gx)~a7>svdHdvRd%6IOBvZ2MGC%BMy0Y`hIn3q!5q`P%Gsy?c>@L+4}1 zSR%rcSVHYgVqx+kgT`-U{4cLE^;|7v8>c}qw?BOg!LS_9o+2huZ5`6MJD%!LTz7e( zwA+S1Y?U(M_dPvE z=X;^&7s*r2h6W_;wpl^TrTf7iu67g%ep{#esx~w_$D;5X1L9Gq!uS~8>e=n6uV%sc zDTB|@b?%rmH7aR$!RJu-ejU0Fyr!wsp?8P}YulTCzCX0ptc;;W@JRX;U^nuc79~SK zFt784;l*nVL`n6dowg@)^Rs6_G=Au&46=gjoAQ|cljCQSJEKfb)DLO1UU%G+^GGXU z-}N?xpwM-fky5CzY(;Eg?(40NCC$al8G%JlxM0El?@l!OAFKlh=(tD?7ArvBAg~Gm z$7ahu`9K?~3C~}3U|YWp;Raz{ID>%pIMS+0_@hMd$lc1t=(~<2f~%XO`f>8AD%SLW zQ}d72u?obLit&08-Q`?1IFV&d7#p0LdkChI9NNvre9@`tQE@(r^UcT>F%#Fa#Szrh z&Ayq~UFx}Xvk-3tx#Uwm_?vb`j3XHN^Iq9bPm#Tu4D{sGJfv{_cc;|-!Fpg%kyflb zX~gxaHrP2Q3PkwSw_dmea(S1m1*h?M(nnqfMwYp`Ia#n|eNO{Quv7AuvA;MfGjj&~ z5Mbm!os2AR+nCIWxkobxEqZY}uYaxkA@{e-e5XC$&NuC58Qe#sPa8CEl6+;h=x9tO z2Y@j-rIW5T_fC7VwJ;#aX0P*4T40b;(d(l~1_tKaHT#(6xvkLify3 zk|ulRB{;c1KkvT>&-?x%Exv)m0BS;+5VT)X9AzEsvs|tTg8D#R7H5(N{w!Rb2%5Sd zt+y67tCB?YH~Ba0j2Ox$dKNJBcF`9w6tB6`$cBV=l;T*&DSRdv3lci~i*M%N$s4bU zG+D=X-gH8v>$JuvLa&Cfz#p2kaoiCuEjRd?6P> z3h3E|@$SDtD*0dYl&TCBO&6RM&BOwhn(~5ycWY($E=!(c=ltB+;U81<$Ki9Nt(o8c zOUc{IuC_d&1l_-WTv}~hplOSu-fj!F3gyqK=emBX5wK@?9oz=So2)#o&oW+aFf;xl z4{UF7Cif4^djt$5({f0+zMh(#3CQ~XZ+O#gT`I7OJc( zIb52`_Y=KYT)ccYjVn*kG@ea@_XO$%4e{U2FdKh<;(dR4$e1O&oHsn|@*|OMey+}2 zyLaXyjrt^^$==%+dfTHm%%0+PyKuYQ&o0{s&Va*UCZx+tNee+{({aiq$!vp6spKxF z+fjv=6WqJp!X+YDO3%jSDf){9#f9nyse2&LcjvFRmSMsrdV9|khLKmKC8fouKyAFM zvqPr+{LtYk70!hQBEV&13MTbWS;zJHa}?CX%w*EoNE>I!4bchnu-pGsi3SRRr95Vu zYq)QQ^t1_4^K&FmF~H!BDO2eIFj4>B**f0gjE1{&f&adS(#Sq;%Om}CYvqi8MX-ZF z^Ug;*bK`6Ntk&oS0SPp`E?2ylqL^ z`AbbfTX*rkkZK&*ach!f%{BBY&caj)JFi9Rpgs6}652f1Tt3iwU99Z(Sl9aSX3}Uw zOpeTX*J*d#2`D?g{i!qI4xTEM8j&65o_q7&Rl{iLzE zW2cWu*f*fg;~=G=ocE~HFFfJY8Efa4@KW3RJ95{}1>r{jbD7q&m31&2;MVk9(5FGF ze4Wt^r}E)<_`@b4+r_EKcih+OBYpU9C99Q*ES>0C$z&h(li}Ro+_3^ROGTD#buOQX zyB=^<``ww^b;Fr)qZDs}y1VQbF0&SN=s>JD||AvO(4ATZNkWj`D_F z-Ld-aKYOVq9(;Bbvw=^u&=y%-9|WbtYj^@BJ-fbE>b<+JUZ{7b*79k(UkLTvE#c0q z+;CZ-q@ZH&H+<9R=D2h+X|%eUTEeD_l#%k?Y~hji^;6p4+tY9j4GI9Q8jB18AiJ0F zeZ&0?qO8ooHpTf$#ia6hIAtfxK}I^%Upg-@C*S+=cfO*X<2b$h+if0a7sow&SLx8T zCq8m@=uGUt4)-Iy1^I6thAMSO;iVroybbZ++hGp1RsdQ_0VbB&eE|(z_dR^fc5m|r z4mFwki-}A^`dyzG zFs3UOMTmOlTb#d;?>9Hq_I4Wf>4pIGb+7M}sNCn;S8J!J(90v-mS*7vS}K^w@mXw; zPKNa*$lK9&Tf!C@>T|~r;rdg43BAJ0zuOQ_(EAOvwJ2Rry+1i~B_0<*p5t48t{d1Y zFS0HKu|HEa&mAKFp&|EBhG+^;R?#kD#P{E@eaX@K&19o0AI-DAqBN1%j7(Y^ZeGQB zzW&lPs8C#y-=S`(M}Y3ZH6{pN6?UY!Co^)Xg_RS5w5(!U=tw}3exW4R^~I3mKn-STkcal&&n<36@Z{)( zz8+;LuTiNlRAbuyISeifyjSNKlliq~|Hveio>m zD<~}IGm!VhY-V*XYq`Gqvbzqbp_fLt0%baqL{B5-f;tCONfg}R6{=O6?}-$O%4O|| zCQ8VOX9$_{xD%*zsKD#Q0Jr2yqETe8pYj=Dj(r=>A+QC7JVHM{SsZPjEAM0eloKzt=U`US9*cE{WniegyPr1=Xm#LadFv2PUxA$`9?2!17J%4roZh1H%oiBT1EC@43-K& z%yw$U!iiI{;!210=B2{;&CQrM3LG}fdQsmrdx_hCMWQl?hSdfcMk~_BYrbvi>`zQo zpg9Z+*;~&GGyR#+%hbM=pvaA3?} zfb-PH3{0ZEiQ8~ZX?Y5AXNvv#CuB!ZDpmbMfhx9&jVDEMR>ZMop&dm=Nm=Y4p{~(H zjonkwp}@3D@MRCCtuFefiH-R2)P}8X>L%bGAU}R?uEkZE$q*=^s+|^T`=-aEzcZq# z`oYddi-4d|sq{7Do)k;ATS_EO#lgyG@84eZ{76rA)9)91k@-vFSeA#%uN z*arj6Hm!eOnU6}~K};%lt(O#*s#zO%4T=6;$=Ky)xLCKdK_9#v;FXE1K^SosuX3mh zvYda!^vXVz&%CigTZ-l77WAg{wW>-+e6RCIYP}mD7*5Xhgtj8FjoNiIH!rp)Z46rK z-ny5KkQE^A_Udg1YPwVPR4tYI|_^Z$z_?xW+>EL!} z?hxf}O8A^xT4 zox$8d*M}Jv*60%tp~*w#j{UY2BKEb@Nz%0UAT8ZLfelWm3JI%O5=N&v=&@TItzlCs6Isxbb{ zZZ1(uLiLb@!X5s|rK-Bd>^M67()eK}b0Qp@4_I$@oMNYZ{}-V> zXyYKms0n{!kb^Iq$`$az})3m;hZeP4AJG>`LdoR3@>Up*I^*IEVV0^MQQ znx@kaDf`gOtWGwtHdkdxoXfaLW$o8SG(RTC_V-*RS!0xrf~8L-fCIr3*BZpN(56q+aOkZF0mP2aNu5h6t)pLmX$N z@^+)4=TjgMM+)03s|sOtM3Ig$h<(Gi`>SR~cSr*N84yP!xcn$uygz3xmR^CFQxS|2 zT`oa^rqg02ztw#InCACyz_xTW1%NX*0)zOkTWBZ2)|&xZ$;<`ongtHxAiV=H(R= zJErjb-Nb}5>h^K&1W4-THFbENg}-+&gxaI%H4wH(%K97=>YwWz{EPeeI<0xSX1d~7 z8-BQ6#Z@$z&NUog(U!EYD`*Qf{N@KCn@PeU=l{Arz;jG zWmu~_jxYMfr4;gOaV;AjBN8jVN|jtr-wKupA6wlPJfE79RL1g z663Yr&GRY&IY<)AQg8qI_;&UfOGm)~dpy{w{yPVe&$u-soNjG3sUAnn#5UJ47HBNa z){{72;?%@hZq$&jQpAQxX?YD0wee4_Nx09Z(BIM^N6g(or#lB%my0_!B~leh-_^am z4TjdF!g6cUntY z*Z$){0ZlyG9K$r@(=!1$n{@;>glRK?(DM~e7qt@p&ep6bKi}=if3P7N0IC5An zO8IapkMFKaCnR3eAkRHhYwRcWDS z%^vZ&uCbi!{$^D>93TFB4G>NmypEhWZvR5xX|~6Oy^JG*U(hWCg-WLrVss&15nva_ z50?no(Yv0>+5)aRMd2i`IVdZR4 z$cxDVlMUNkPO8t4=nZZCTqfT8e=F5DwE6@cRl}bAImPjjH*LziUsP;$jbq78?rMq~ zA(-fW?c3Otqdx_#x)>{@FZ)TobTTlubGllBq2Ex#AZyAP3l*^FO0K=MLWkyuAfMft zSgsb`-jbiQVXp;2C22NCnDW?lyRrIS8|z5*wu9ouAYN(yD69U>kIua-?vVVf+tGk8 z%GT^9Z`z|UAhL5S(am-o_#raxi8KEXQEwF$N3?Z~qCtWM z4=#;M2<{Nv-QC^Y-AQnFCs^a!I01qOcXtmS{MPx-`Nw^tdFat4yLPQL*AzGk%)WVz zF~NSlTE}l>vR@Ct0kdl2T;q1&ExPkg1NQYZ6Dwu1_OTNBNwu4B95b-s@+^dIOEvIu zD!(31`2? z8+>?mbWd$+*Sa1Qdme)TI&svZovZ#`$M&ZlSK$;Yw)Ur!(Q1Q?tQ?Pp*xD?uPKo6W zcS5%teEhW3(F2C!LNiV-K>Z=Mt(>J_Hzi~Tn$*k=LXaduK|}i}*bMOLH?#r^JY6kK z*X!3;1Mapht3Kj+XLZu!enQO5lO-FY5OlA|TWeV0X*^RwhziQ<)%_oP#!GHAPA`Nd zTP{vvQmSZfarwkzf)0t0OAX6)m6@0UX+fS%^N)Xw-X}DQ4ks$pq$xb4f9?b0c(GA0 zf+8z_Va5V?y=^-r{epq|Wt{~;jz`LkT8onS=rQBNqR9Bo@sNf9+`UV{nZVsu(pqqk zy{6N^O;vr`=?^I?eAz(~(6oWJhKtAmkD@jTOyoz7>;?`s%)=DrFPd0=UUw|(g+-5U z5h@E>`qAQvj3t%*4z=8-ir{m7$N7p!Gq6(ik3lu@Z&9_3vHO$8)9m=BG3EU0B$$&p z;?sVkI*|d)m@1#pw&6vIB)ES*`ZWMU{&kipzC!OVEn0%fOOBe9{?Syu!~y{9qoQOn zO#*2Q7L}PB65>--ikL+Ms((0jWnhw$0zTio7_`Ln)HGGg+Gx;rZ}H^KzNv6gXOH=c zX@(SA#5PbwE0vUOi~MCAYIN_0vlJ(>z_wM-Dh#pS=r0mYV10hhSP?!d{K$@*G10Yc z3E4xvu+3tK5$NX&h&sm(?<3gV}mcGHw)hY!K zwXfX7@hr^l=Xbu?7Z9vVN21ea_581*-?@sNeX`<`cu%&l8yVAWa^2wRs zX=CPbl*Q`Er1vLGl>Hc$yHFMj^%Fols&uAU$^S_r+F55iWa*c0R=?QoZs@?=l|!<+ za-vQCB1fajkTaf69pai_+3jSsXgfvfbH21anI>@=dn?WRKj$TWR`c#$U~d)lzoRyH zla;hpH6>;^$~)Rm8$@9viIqwE{h1O5;AsG=7eVo32S*DaOs~$0nUm6)DiWW|o6NS8N`j{m8b{=8_3b511sJgLOW6&7t}Cr?C|R}( z1QM_et#WyA6B{XzW?0|3H&r(HIokC%$avSa&k~o>Cy|YfH|0=5ter(X>@QN*n}!&$ z_}a_gstvH=$qA5I2Tal??0t`k!w~uabz8I@xVXN%sx%QNc1<>95T6Z&3vY)0<7)9X z!GhGcFKB}Ji+(nu@>KNu;<`NlLL83DKF5b87AcLHIB4Arm31T#ib6!Zz1!nj+*@1+ z_9o2$PfJ5f25PgdK75zl;(A(z@o`{4dRj%Vds^9U;I@}xEEVuxmL{=UwlB{bw}x+l zjchgEAR~V=25*9z8Xx*%*u1lP}Pvsjj7 zO-etjV;mifeNZ}_PQBd(vO?@>bxrt0VCiR`i=uE{!wKaNcnR?c=YxTaht|f^~ zKExYy-x#g>6{3e>*ko}1|*ub&L`BmI?j%lYmmIurRT-S$m6Hi zZYgeTGPSemAQ4x4dY8ND>a;(^3`hzv9=^{524eE7g1$FsuKO66gPCSFHiGSitO0(T z3hJpmru9<)Hal-$$_^srn0r?8=jQXhI03Z(BQT&rxWBBfiPO~hRk?$c1Y(xvpctQ+ z%1`s9A;#Jzj(1u z1NY$GMA2dMh%cJN&B~KJvcF2m$=_<5eW0qYEm-Hc2oRCfs3SGZgLa5?Q!dtLnk^SK zlrhYt1?zlQ@8yZT^@Tidz=vY-@Z{-}fC)j0*!G}5M2Ao3lgPHx(NrGal9GCR3VRf# zT$Fv*z^-Zxhl=h7HrB$F?T2S5J_-mBfVBH1gbZVVbu3%|ii~Y&jMU@)y%8AAIa7ItSXu~bb|Y{2VvxWg@&0Md zUbm}FYhqxV$#d}ZnELhN@-Q~EPU<9NxVH97ethUc&dSq5bmn(c$NajDB4_qS2%Ip+ z1_Wkw)sppQXP*HIz5psjVO?NUkyhtdm8Q0J_ajBm<>|tMpm-5CCy(n*pOf~tK1D;m z)s?vdTGp`MQ|IQhisupC!%1R+Hfi)G4Q~gYEsrHb0gs>c_28dmwQY61yB^xEw&qnG z&qiKOE*6UJ`-&T0?JboenBrkd0eP)e#rt1+o>vm%IF0bFb_sKG&`UWmLV8(F#>N8f z7ux-EnFQk_d0e+D0bg8SG?6fgvO^cz`7;_Ih(ON+QgK3qneAfkm|>Y#$y91Xr_Ppl zp^p?fuwjC5EKuI@aMH2uu_FGhjfds-@s2O;9&*rn?9Kyk?21@HAuE`!s$Fx~ zjwFrU#auoBwtM|OgHIkNw9h_4<~l0dtm~+t)^#{58ew_GyXak+;NX&3IB?U#}hqd~b(z z8+^cO`s-IVbSfk)7UNbw9s(Y2zpC~@seLN#dE4hxwHPyzXvG?o?Rt#?0nuNN$(jaHRH|8E z*`gy4Ug|?U_W`J?lUh)WVEkCl)Q?^F6?=npRm{PO&|;{V9U$gye8$C8z5UB3UYuB? zp-tUiDRKSI!(;*rn10**NcjaNxxb`2R&D1G1=gwe{22d2E4CN zR|6R{eyt37GCiK#qe+3w$mQ(WsrzGHui=81SN`Ykr#(JwX>vToEm{=bHGzM}`}`du zL+^rgzD;Ugpw6YGpP55f(Wy6&H0USRJ_v~+cDuFdVuVffhhi+;#@B8~ z*_COu{YD(SjXq{2WgnQ1GV$pc0~CS&m= zpY#LY#~7HOV`&~*JvkR_I<{RQBnk@8qJ^sK`binOot~i}WYb25H>JHmztepy3M8oC zH*KMbOOV4al1n;T`niU;hMlVqqtmcLK96$@0tjSq3J?KIS-*hY3*hQ_D4F576eXumjzW;1`GO10gMNRESvmi z*NqHr`u=kzR4HO%T*QjMJZf~xBQ4w7N) zT{OnV3POIx2<$3n{5Q1!K7f;r=8Y?`WbAcUeS-|9jjpLw;#A&(5Pj*gxl$u5BRkd_ z8z}8mq4DI0^|!)OVJ}Bl|H4Jv4$GFqRsdz)n9=HWbQLnzYBb zZr`hu-?w^bn>ui>eSe<~^>y`-E(-GQe<)OkQFg?UhopSws`)D{5~Z&UEaqTZozwuN zLQ;cnL3R#RRSgQ+nSPnNK&vw&$ygBbl^XKV2Q}=cf8LdJvii@1H5<#~fwrPGvksSp zx%hcm#P}qzCK@4Mr8^&tK$x;;whoMcJKVnmzZ-g0kx3=Q2}r1?y-ZZg%&Sz6fT(OV z`3W^_oQ@ncXmV(BkHd%{b&=!pba4FmILD#}j@LSKBxR-so+iwL#LzC|$8;XpB<>ze zufvzpOz|EbnyO9eunl~`wcPMJvR{`kNR0e*tYJIX-(A-)z%O??-BB<7q|wcqk<@gE zFsY9ib;)LAillrkPFDRW3t$UL__ClyJWme!rO0$gMn(hzTl%w@8DM+i78GC=gx`0c zCm{QI`gGa-&&J2O0^B*iUk7h`wTd1gQXgi#B@D`P_M|Dp0K zlis8lJ@D}k?FW^Lb)F;lBsHb?ej+yO#ESVT*P$qTI~Dhq0=U4Bu=^x zIAkdbRhNF(`&<9iM9sas7wt6xcWUC8A>hoZ4{%+p)5*raFu$)mZaP+0+_vk*FaqW# zjeCZWS)E!X?kB^|ce6lOQ*~+OykRC;6N*#=B*`I*!*aY=;Up;y?~00F zHYFk`X@Y#+e~-STn(3+7UIGe`iC(}P86f5X-pYd!k14CwndJ=7_iv@ApEL%2 zbm21u@TWE}%%Sg%+$`R-_jL~c^ZElG!ZRqi^GH{{@SsZGUi>9&=_EIK*bX$fPf`6t zt7P-2H0Y&gnP&iEfKOq{wh!KrAY4y#W3i@{~E+Au22rwBu2HCY7dNvjIxMU}n1 z?dVr2SxjRt66md!jVdM^=yJ`dR!^sviroPXhv}sGJ#KetMuXKCBqDkWhwqP<0XZ_| z@A-zE$%tV=plHrPqcV{)1;@!aQFme``W3qmLkEEAU4ocVYEk_@Ge^o}S};3 z+rGjLr#wOgG`4nT(=yY)#0x!dzsPJk20Z#w=0P=pMjH~@ z)!k^v+V9{NjK3w;dlSkgXIt!7G@5_+E`wPQ;)MSewfvIZQ*~SxM8~>hvNk!M@yN8E zqt!vpG!u~}ph59f)m8MjgD3{UfdtEVbkZ-PnhG6>epTzn$PXW~00$NisBcngHDK^l z;B)My08#!E#PGe(MiN8Z)lI5yJdcpCmy_(f_KM1ZvjD3x(AZ2zcjoEQR zttW6z*YE0ZiyFMD-LCzS1nDDiWe|*&U}6HeJ`M>{%e$_j5L@fwHl|^g5LqTOLCMup znt~j=Dr-412c1fWO8RN@eaAP1>?QF1#ep&JcOkjsd8m6RNYIDrzYEHVTT%X^n5Ptq zjx`Je%Op?!lZc0JT$GH3oM=NB(pMB%I##2F#fgb;v~4*WfAsIoL1TZ)ltf&IC`6RS zNTO8|AR$rnA?w}e(>Nh_X>_UP+Sp30m~2x=FmPGhzi)tDGSc+wm`W(Yh0Z-5)@2Dg z%nlQ~s^z?la7j-Z11hwg>Z1aJ?3*Zj+MOjt>u|i)%-cSL$1+wW#tezx#iEN$EbhnY zpqv@c;cS*@gSj&k85I8RWg){g6lAwe3Og>{2eN@)f18(>(-FB$e9IcPTWj$r$?7UK!JdG{y`7hblRX7`Tj z?NQ8zkyZOHyM%QKq_5ejAkbJC=8g51v-wBN6mv7(d>b=OvZn3%0_VP*-U;8$6s>HN zRE|lh>UO8q7?>N^h(8XxhF@g3kTj{qUn`;N`t_~k$+%j7-)}9%*>^Ejz{$`wNO^z7Q(Pm>)Y3u&sWWZv^Dk}ascO8;ufJL2 zRzFM0ZzT!@kIzY4O;(Hc5+v@I$Tw2&C0ZQjAS=e9@!NE@WKZQD{*gftxHQbn*M&<$ zY#BL9cK$G_g5p7fXIwGo0w8%-9)$L3I*(Ni7i-!bB&H^s&viGN3!MEcOY%HkDNvV5 zH8%;ndDZiQC2r>yuJ-?|8<(4*9o{a=7BUPv-VQzdwIKF~G;#~ILF;gIX1wE!$9!B6 z+4%UgfUMY#A1HMh@c7Y#LeD1z81R3k&L5ngdjGFSh#c_N;9uc!O^?~;TJeG|43^GY zeZ5X}U=U}zNh`@;Z+jkT)-|%^8kA)3ETlMg(DB~Dy5k?0Qo>-goXlMf*!v>X=O%Eo zn)kg{yfa^{BTrE>zsmmFn_VG)AnUv`f80b4dh0;%zZFrABXzrJ(7X7KI}bzbM}Lx5 zTEs*xKVz!8rtz73yWNFnL@k4Tz{MCg839XmNT=I;5|^wh%&wL0{73Uh6EF8ydA7kO zZk{@dn001LTYYy>9#AZ7yAJ;EOt$&4-o9fS3IDd?q(jDl34s#dt~g`Y2q7{uzwnMA ze#JvB6`q9-;a{EtlgV{ptfV-_{_10^hN$KpNbGbFU7wrAVvbdX$Vy+;3S z3*nK8c?jx5K72E4Jx?J2{{Mf4IZy(bMV~3>31xMS+MA7r%ddkZc74xl_kV96CvFIh zkWBu#I=Sp=-S{Wv=Qb z-g{j7@2=xFwmc1=HU9vI2o0CZ8EI^!KD#g}bN0?QsH|Dr+k7eT5GSO`iMpko^Vp&= zd6U6xgMZ-j?Z|0~7QVvG$G=Mhput@$bs zZ4^4!8)gT8vqnfRvkd&Vy47=Qc{Y?#Oq_Q=i?MZgA$DEW zuKeV^F|7Jis6&q)jk2irs5VGYelsRUQ<=c2{B+0|%?pAOS=C>L(_3YY_o1{r(Zth%j}Z>iwcd(_H%d z5EeSMCG$o*9-4R|$iW_4P@@EHJd-B_AAe zG4;FO-EpMdemWRoeIpgqneBtMI=ke&QGQG@unv7TpwU@7G>L5aN@MPUU8PaF+H4yTU zV*9?In&7<=hJim6JlUd((9!9z*e{d7lHMOm%p{pr$11c- zcfAra>(e$k4U^o>&QMa((v}QU9+b4%h8jCE0;@0Cv2yNVZcJshTovzmDX_M@y)=} zIE99B8#wmRNK1=KN|B?69&+N-*I!JeO-sjq?95B4G*+Lem=|pcgdXbVchr{t8pM(- z1%q5r=bdVJ!t-Z+gE6W|$SO`2JA4)a0!Z|5#0~UxMx??4tLjhva9<+(9nx=vU5khJ z@rkloC7aD9mI>L&v14sNIDjjxYHNuzbK@m;U!42?Zv#2hm2pt5@!rA;E2~6?Mmlu7 z5Afc%7;^b72X_EH%|bvw1|(t>1iGC}{`pyH6ZEGnD+UkkBKrmbe202Oz%gYXczwl> zQ}xN&A#(lD!@A?bnK$;j6P>0Ey3`3TuV_I4NN^I!t(lc&K#v$E4<#t6={^__?rJ4L z*(4w`)szU;N@lWt?&%OykzW;09a{%8xY@-`3=_s?t<#QPo z`KhOLXpy9K3o~CB(VxdY?1^$^^Ubl9Wy47oX7pXhYBqy4_J9}aF}U5sH7>6H{osY2 zk7KO3=%1EsyMxKe7d7|S!%KfVfY$ApBj9}R)Eak%hU#!0n#VlYtofS2v2(I4adjup>*bijC^t&`F1>{`n7eoFuZJ z9S-;rZB)MoRICI9r{4cQEk}%Ax9|7n_8I37uLFTRvx^2#@GmA#ZZI*Q5lnGwf~H6W z{+&#RTFaAWWzBzesH6|PTlxl(e;f!O`_O0mQNg{qq3CS0^GYRgk*~duot;NjC1G*)w76%PYY8-V^WeYF!O$>o7*iieDMdXhIY!EXu({S=g%nk*F5n9Ie zLaBr{gzV_*-`Pw9g)H#cc3MZ$aQqLv;mXLtHByC5gZqX{1&cKfA=Wrq6F6?&aUEivYI1xsjldx6aMtHW zS|rPOw{)C6!w#;$7nXR~n zb{be`p=8d{Aha1zLDoF1|+_NG)mDSK*9wWPAZxm$`ySmiy&XJ=M+y93&zM= zaQl(j%=H*W&l!$UX}eN}!i%k{j0+!y67n^mG8{^(u09S5N=;vJ7z>KrDDSM%!PKMx z__DS|m-<}_GRS$>dt-)*wjiPs8tNP^n_|pbUY171R{K|jB?=UEW(PBE%&Y?&4Sixs zgz@M$E^3YyZ-Q9l0JCfncvp;G{Ocs=zC8(^Sd6}M#*1x7q710ouvv$5Urcyz%_^jM z7oo5K7p`}#swRQCN3Wr^v$-mc_ou~U1m`B2nj#+9fwgM@0qon}T>*ie00 zGvFbg^!5Aw^XJ#lfspkNv{wiNbS<{L1U9}15wryu!X`&lIOac6aR3~=(YBRCKb@bM zDGz1P#9Ftx}c{yBExRU|#D ztYy66CJL6i9eqWJW$0T}Br?!L3h>wk8*kD4e3a$rDg*-qB42Z9@{s=i(NRfFN9&d1 z{x6=exHZ;<-vaxYH zvwgwvfbbMKWGJuL2(-;~qlTSNFhl?FNOLzAH~b3B6_vXc zXYcBlAA!g`r10X??%K3{`PnRjQ?zD|blkniHLw$-VAoNPSF5A7v$tyrEup5f^E7ZS z*F_<~FiM04I~pjzH;BC2Fk^(jkyXs~U4Zi@)5LU|M_8eH4_#jOn(@8@IpvY^$HW8> zuU+P(((lE5r7?`}fA>|BXg}F(z>>cJCPeMHPS`4 zHzG}esYC`XSaGrMqHDNgMX)Qt(!%(79!p@JDjW?mL5ge$5%ThM;$F3L7#sx>!f)ML z0D29kOi}b+zsz5o&Csjk_VQ7%SYN&Wc}=rAesZ|*QF2u)3Gv4+z$uMvY(TXU0PLg6 zU}bL1y!i+P$V$44s(VkbA}L+ppi-zr)pJRvWF}Kq6O(Daon#wUq zpYwIY!X6h(Zt)b|xYW{0Il3xKem0eO89O75vI<)%x`(eCXX|L8gj0kR9aXqldmjVF z<`^Q<*bkfw{vAhw*T*~m?WP}ZC?lPRL@?o{u~~-3@Bft?SJSG4>dTr++RJRnF>-YN zURG3k9ZPv?y28RadHIwz7GBiMN=BAja%g}lF%{Kt@s`c@C$|7-yKZtKZNsIY**eg5 zcUaT$xbj1$E1mNELRmz9x5onYm}%n;5EjEJO+R{*dvSpxzT@ipQE9}F*yzj|67V!M zHo=kFC*|fdZJ2WSdezQ`PF=QgYD2_H*fIYxDD}Xmm9o)ieq-%#vF0SVeapyWX{u96 zedW$(x4Q$-G8w(F)7R~P)Y-dfnb$n+*hY^49TXsLTSThdlR&W!;3ZLgGChEKH4~cX zrb7JLP*PX@v%sd^YNJO*U&GJEP|{z#og!8=c@`KwIp*=RvU6Y|M!-q;cPuEWzYU}- zh-1+Ovamy%Iw+M)QWEIeus1Q`$}eZE(9a2)2Ovm zz%W%YZSPRO>%AgN&Yb4X`+Ga?mV9 z^N?WtA&p~aQ+Xvn{ttjnx{TcpEDK<=-T8B?>!apUr8e_AX!8{CMg-9Q?8>T#IkoRo zlcx~|>MDBLDk&<>zUUE!pax@QW#NhWRBL%Y-;(El-S?%bTWd2e?$%c${<)mmkMqNZ z_#X^B390u6cOwmXI}I@huQDIC-({ThF10kD^G;#{-}d^CevN0L*&XNWpEEh;p3(jF zP7~d}YKNEk=573DJB$vCD8wJAjR7YzL6t0~jESQB`{*%=tZr>k7#n_UFxMgHYyL1* zbW+8f_nL=zyGNjtY8Dq~Ra?o=zvYgdYU(OYM9Ap!lWjS1Wm%&@qp;(*2`n2h!abIr z-j;1eO`bj)(fX=}fgU$cYeMAu-M%@Fc=(l?jTyj>)7A);tOWyU*V%{H{1?%W;q}YF z3P$eC%bV2D2QtbmU3fDu33QaT{%9DQ;%lXq=JJ<%(&$F3Aq!iLR+ic0(K{;VqYSfBaWR95xWF(0l}a6*9jhtE(i>vJx*=f$nWDvQ`L1IswZiE)FEU&5U+ ziUN>NxF}jy$tMOrKWZ*s+$zh8!xhw*VPe5Fyg1TP({%7<+tNQt(gePaJW5d@(F(X* zZ33fc_Ym!fjA21qRT1p$^VDJl;)d%t%|+(5vr;J9qJPDjhE_FIae} z{O`Y#Bh<-|3u`a#*GWH5nSB~_SK(q&sK^2nR+c$>j+bPpNCj7}tWPF|_{mAp5^T;9 zoCdM8b3Es7KC;wUZ{tJ0VSk`0gGu~pyd}ktl#uWhgSAylAr9V3thv&W9lrD|0&6q4~FoyZ|TB6wX10GoG3nQbX?x%iNzy=XuJje5o%%LfM8~|N3R-%vOTZxXHI&fq4FcQnq=>Um z1#B1;NHdpT^d3(p{WZ)-`HM)8?Iu{EvO?UhdpF8XuHWiM3 z#C&S={UUUvT^!0tU;XhvgqdkA-$#S1Uwc4Q1{Aq^^TNTo6bP-q*^vvESY8N#gPpR*0SKholeUu|{e_Qx|r^(on0O|ev?mUE!1QiMH z0R8~_rC$_XD%|!tUR?gt;`Hn^Z~yoFOgnoi?zw4|E%!jD$018i2bD5h5x_KG<~#;P zquFp2<_;*DZf3fI0W|O~66Ohj)6*~SBzNh8loX7oQ7B7oB7!+4ep7ud!+kiNVjfL8 zLYH7%ma13*Zj%PcnhFy>yA$H6uel8VjnQw$Yd^L?m+iR9ua#drBa*YK%?2uwrN7Z|c_5TU#u` z64cm%r&vgRTCBM;RqzJ!6Z>+*Cc~1`no+Qh)N13LXbspepzc#L(?_T3dNKH5wUzmUj7b;DjdMr4)I=pUsKtD*=hbXPFDKS(&!Inc-J?Q7-m)C`QXRZEz`*bcv8)An{zPIxOf{J7A z`!FXI{0K6Av;RrQz?hL?_;Rg4N`);`R9Sgx*kPcor?z)9^JA8yr;Wd-u1QcIW0Hw+ zYPy<1cTobvu7Ob)G^R4iMjiHNyGc4E!_dIzT3EAi%JuFI_Fcv(8Fk`Bm>ctBWdFg) z60&hP6+QD;z_bubTI`z|3q}kaOyM|AQ{I3!_MjC-J2oPWv<{NEnmqd8u52(ykdZMJ z;-^tp2xBob#$bKLa(%RQ5JtARs_a*_a}~!xO;u5I7J11V5BOv5tChnlKp86z@U zq^gjO?aY`Qg^qEtt8sL;DY$y?N}9~%rdflY3l{5u(!VEnum`^f?bxj@b8&U`&=y>d>nHs`@8wi z56@mX-g_aDqLi(bqf;aof=e6hU-=YiaWt?5SIxY=eC`1#!vvO6d?C3a8+HIeCmY$1 zWW;!pBQ2ExYmgexi=R$CJ43gi1I?b(b(uUTE0@cc%HkXzuG|8z+Ds=6h@vvO8*PC| zU{2_pk}*1BDag?h=VsKORi#0igRhq6s#6Kk?UFy8e^T1=;K2~7EED}MuI&T#ZWO6+ z|4x|F2ja-obBh!~iinMS9axpi1KZnBNLXShx z6wg=nk0MnvG3L?)S$P~&S4C1t!AaOCVQC@TNiUu9qX@E45a*5-J?(t9G~dUe(8B3u zc#$ZFtzSd@U2^p_3hA$bk8gzGf{rq(DYs@h%gzp7j7l^2g{5w`MH4whH` zFht#7-!{+iJHW@t?&Mc@TuhOObebAX{9+NvpDA*H~ zEZuK0y4IUyb|IhPi>*t;LWm579afPU{{GzaelI0?nmE}4ygG5=w;KsP1$(?u>+!xW zb5$+3D7*}<9d(>tuDAl5{>T@16L#AlPA&65kEUBqXx+JOME%B-`@}YHc z(Lf7HW#ISJlxDcCTV3H< z#X=De2~OZ(;yKgDz>k;~;yiZd?R3!eR_+6o+yJLYT;jKT)1`8w7U1L%2P|sPb@}o)e=b}lNs$m-esd(r9(9f-_#|cz)Nm4-cFMUN7 zspJmQ0E?bSHlvI!j9s*p&U}zV@BEo$^(eNnPj{Vjc%kuDNu}~=|Gu_WNkx+qeXWiI zG&xD6e4;quKscQL>a_bk`&k5Y5LmCl^9?X5yx4=MZq&0jWJK^G!md9~d5q=pupG&W zWE;lKtR`%T;)bJna3LT>LR+nYluDTI@{jc%0)W}EIA*E(yKY!Q;jw3=#43SJoLiGd zI_{5&@h@|nGtywdPR)x>u{G<0gtR^DIXrH`vOtxnz( z`JK8cnoCb_zTH5|R_@h4e70i3hsuP9$-IME4o|Kj8f5x$C**w&pFSbT&Ex;vC#9nP zb#_0_qosq$qhntjc*KQ4C+Oi-va!1(bNw=ZSUm)!zi8`_WfyUuwJ0&>$yFK2CT`#9 zBDpWyl}(nN!kKF1bKWF6;CBR@OV^oG==4>}r#9fR6aHQnG^<497$keh)gY@7RVLzG zo$KF0NC#(UO%AodLBmtl%emNDSa|ZC#p5r!hFJG9L-d8E=G}vkQk2R%akH6WO|_G- zwPS{lSG4G;vQL=o&)JNx{`yny3cbS$PR|>t>iAvv8farvUj6#@vV#DSd#@1+Qi_S@ zE|!fK`6bDsXp-=;(g*H`t|%L5-?pnx(~@K5)5v}2tU8R<@f0g6%RZ<4891Ma)XDy` z)-EK#hF~7eGxPDoklnjZbdwZ+CPhlx9*~r2i!N@OuLnVe4<=AEHh`j2hg}K^xh~;D zRM8iz1GxELat%;Eyq_ZEPfjsH4L~)(ar%7LQatd>e*kFvDLA;DzE`8^ix(LBY?jLo zy9b8^ri8ZHC4IUGFEpf3PI0NjU*en}*K7tdVc>Rq3enW%DI?6Qm6pG3i zOVJRb%(jV;1iHw1^oF<+t6~VV)`%?pE_+U46gaUai-4})k*m&diQ5pSYpa8@n`VZ>*|jnr26IQCKl+&KmAfOgIyH>U!+NJx!Jwq z&fj81-e20X^vLjd6pbC9#5iMet@lVCGf5_VdiC5brpIc{@4bTHa(j9qlf97znNde7>L7;J$r zt$vr`YuO`&FBrJXj14anHG7<3ZY}!4)CgrN2VCXnHE`g|-$paWIP7rDJ(K1#tcu&& z9&6i5Feu2NbCyd;;7dkVGsP@dxVUg7q4{8Vxc5fl@p5eI1s0-Md9u<|x$oHj7-ct% zOIwVxnaMQNa4{wnr4gpi;%Rywgk*b)HxcobRNw8R|9^wua74(dB)yxdJIfgkm zwRRIvu;kcgpRov#C7(&+UhW5`0&kWF#aQPxG|%0Zg4Jc<`mHD`PCCY{)6GA#exwe; zzyye7gTy$#`ok$I8mdDxGDa=!+@mVIzPSo@8b_)AC$5l;%7-RbWgWe_`Q?@6g_$`e z8X3&lcdkuyFy4?7w!0l%(N}4VH4jOL{gdVXeuFLtP3vzPDlBeYm(ZUuuoK9nr7-*) z(ua@P;ptA@;3U6PC#0o|C+Z>jskM zd+XiK$*&?Q{tCi~#eGbWbJxL_AV8vnp~RRtm(T9kyV4RiBgM|i&HA=cj`wyvFcp%h zrRpx;y$Fl)0;ll#Gi^;J?dgf#^6G+~p{70ZylM|e-F29`BIM4o$(j5h9q`|+o&Czo z=GoOsDwD*K*lb>k(KjfZpfgJqwJpV%CbeSETDmnbwm5sto+M_4PX+J&XyD00@NsZ= zyy`Q|cRU#Ku!P0^HV*DvCo`B4Rx&tg6M5}u2YlJ!ty$UXF)$foy6C4Uj2j;^UD9aA zSAzPHB>lwVXl#XYcx85$gB2j45FqH}I1;xvqmB6{Bz%XzQ(Pv$Iiiw9AEHhlNbHDd z0Kg;_v8%p+YP@N}gp^O!dd*tu(4|t?RN}!&|D0TO=I_Qw=B{OM!%L00^W=cAbet!C zwR!P0nrP&@?xOF<`l}>~+U#FwZ++-u-%9Xx%eBhLdo!teDo^D(QbQP(VPivt)tU>) zHtE+N*$-71dYmmy&wISj&j3MH!0SKqND5eyR=zV88Kg2etQLc*{@15OmU2yKq#{}t z>q0py;j%e=30L{l;r(81oM`e`-211_O#xTE#8M%H_OE^4YmJ93`032CX9Slq>b@pH;|`80pP#0V=Drp=R4Ps7GLJXBU^bA%15 z)REP19&-THAZ04nWCb-2t5X&&V$-B3Ox18Q$k$o6Rnioh%Z7G<`K^pbv!1Qqh?mPy z&sozl!1#fNT4K={7XMjRT$2hWi=k1M`#I~trEZ){^@S32Xmxb*-eNEg1<=RIN{{bw zO1&NM)Rzg78@0P?3N_M79`}R5gjLHuq_Bw4gZSk&_6$)mA=u_NKAed%^*arU44sm^ zI^+5`qaQ8N&#_3~YeqH3Bfpoa$}ir^=~Kyb{VdDIL-h#nS_hyPJtPUT5PwRn5h)bp zD60-T)f0v>L~8$HtRMm21gP1oU%$t5qQZFINB_(b8IfGh^#X@uknU<@?Cfm~UxWLL zBs1>^O>#%DZ{*MqHv)1ocBai;Fg=q@$&9~g@>WHvAH!$DLY~_Vd95rV@{IKN{r{UT z-JF}_A&4Pv&E`rUOGI{@39&)<5zcN1e?vqV0H$B&@ZL=B^LXl^a^hJ$G|gh zwhetvZ|Av>U{_O)Tqz%+t!_5qnnL8*antJgXD+*M`1tuc$YCe+GKy!X*l=bbRfWji zX46Q2hxFhd<%{1I6-wjXSHf0(9WLKITXkzu11ZRpq^iS;!wseqCpjw#GjOynR#tr~ zA+wdv9dAr&DyPj@k^r9E)9KpdX(Z*;j*FWn*y*O9SbCYmdvfjbJI=PAIBYA{^O@r6 zS6|D)>@XUyS*8`iG<&u@Pg|?Kp(`Z|3&R}<4yNy|UbhBb#h1nuMw>K*?->bWanH^xGxp~vcjwT2@aH6mIeOpA}_yx zS{|o?jTbyGU67)8LJV}t4;);behtIst(m(j9OAeSp6sM{wiak;+AQUOz%2379r)J; zlAA=!>bTG3yViixn6G{5E%6X$>+9q&{qE$la_W(L%Cj}`|FQR$U2%2M)?i^t;RV6n zLvWYi4#C~s-Q5YU0fGkz5Fiu|!9BPK2o41VcX#Xa-1m0(U+540sRkvCBWs_%_mnj+ z*FCa{q~+x|L(_#?L!Hqv-rZnR`oC!$vAxlhpKlr++5h}VL}X{7O9!>;dxw~MGBNjC zFkgfqhAxF){p6Ue(Y|qi{2ll1rSHgSto%sqXzWEL&nU{}r;%935&3qbq+*o1ZvkbG zMVmaj!hc}~M*317y|zsqL4~F5KV95RZM~+tvQbGvsz;t(Gc^!wRi#8-|}+nD5$H4 z)J*(;mA0KiTd~RJb2O`L8mnIr#U!K6;w|=vkI*3EQt5McZdB>G(t4GaR8$tJmad0{ zhN8UWXr4TC$l&n@Z9KeZm6SQ)hzCCC;!Kk6ONX)5*Wkz(bFB`!w*iA|8tV13BP21y z;aW2$?7nf+v2>9l;W#ek+ldt1z3B$*b`r-Y=C!}}7$B)LGp;-pzLvKIT4Q>@9wlTT zB>$ubqcVmDzg(Fyq(v5fFP=?eS$o=Gw1$0LoYB+AmH)x;2}LR#Xo@L7d^oj9Jjg?d z0;rlQFf>j+c4t-TEoLIi*=L7*q4|XKPAqauZk(%*X1LJg`QO2X>&{KlK#x zGv=DG`_b`?%rdcE>b^(bvZl7`lE}xbT~}=!d<#8`Q#7NGUVDJOb-nMZlqngL4w2zv z^{vEgu@t~_y{s3jzdnCf=6>=aOWWa)n?8E_{fJ-P74-CHxisn68TKw(k|Nxf%-hlS z{1M3ABKghC-t1Wo{n6~MY$zuNs|eP`32(jN$msQI@m}xQL$nzFiBuAjl~PuLYIn0m zU-zx@rvwOx(eqN8qpm}5VeSaZBtnpTx4BJhopkitsLR_P|4p$-_a+Q^yKpjED~);imek_N%y@ju1#@8Wf*O5`FXE@uP^( z{T1>1B$5*wxsn--oa39%8?05Zk-u-KB~!LfbNP+*)@M8rC^b?}CBPk?2R0Wfyhwj7 zn*v?-A9^>~Et*UfgxkDI+iT8L{M{|(HlEzN=|(+qi(_emgElAbu1LNXH+<|aq+h_% z#o=|Zu$k>j>mT#beHA&JcDlICgLxXn#szj0ru4#unh=fK}L+7LA zei=eYC5b94DWJ(+`FYT4f=dg3C9|tU{Kkv_DC1Pf=XUdQ2w<}Ks;+J{aqv%8)n7#d zZyU;xg7XG7RW+KiF}hx7HE;TK(7!?=XkSMF1K)onHP9?@sihRg7N0$BmO~Zn_B^q^ z@OP;mLp=J!%hpmQKz~{nefR~aSpe8CC9J)+B_gQ`Mx~Xo_I@IypX@q?0@|AVfiSJD z5$BiqN17Wy`rhy07J)TRv6nbAA6w;TPGF21qfg7hu-)!SRNq~ueI zhN0dOW@HqdHF!X&w7!BZ7#QL}2E6Frr5{Q_UIBoF_HQ?!(9b#3OfLbT2}{&-{h>Er z1_ImY>tdZ0K{$);ordOBlWwer5uUcyN(TZhkHzC!ru#H?II)vJECLm0W`@Z-!j?JRxjRMvA*4b z7fYH^&D>=kx*WicF%AZtMtLK*t#993;iV&-#f<6B{l(q!H^h*kCjzR9TtVruwbRgE zOMEALcYg%BV-SO2O}kIYB)U8W2BW`kmvm6HmqG_h(pPc*a)3M1{Hj^ zWX{es{+4M78Z-B_nj(*8*XYBd|wMD4p#*mxB|_U)Idd-}T}XuU**XJq_|ZP7jTtg=K{|-FL!QTU+=! zxp|pZI#f_Z7rK>ycZSp7=fn@lICT2nB^@mM^Ry*uNEdLmoSRTlgCS$|aJq6@10`84? z5!Y{elL0VBo!Idx{oCZDHXVKj*DB--g?yo6I-XV-&^0OR3=T*aUT7Waglfd&QIa8HfjAZ@ep|6T#gr2KjI|Dz zVrLti*x%|0n#B?Kbu>stwf$+|@5d;&Vvf_bXv8gbV|q*aQp;%M;~+8bu|IHUDcQHO zchqpPbLRWHQ2r=?_&f}h>Jd*%1zSGkBagsq+hvB;F5ZT8StNLTf*J9jYw39U>2)*d z&9>~;(>M&3_jEcTQi~outYc6B||T=Ywt&j;s+wJ{0rVn13!;M~;llLx;p!Tr$jp$DdsP*XqJ7`X|WeP%|7N z2RQc|U;E2h?Xr0UIZJIH0CBs7xq4Edcrngir02 z$V7=e_GJedNR%G0H2V!A`y+FRrs>cVzgJGED;Lq|6(_N~Zq|1c_yvs%?Rj18;_L12 z<(zcR4v3qM{UMc18eapFa@2?vY0%|=Bbm-HSZ~d-4W{k70$ulN`Lr}ICyrs2I-JUx z&2b^JPlYTTyF+XgH^zl0Rp6I-KZY5qaC&;|rDCN$ETxY5{*+t&*@#PB8^Yy6E( ztH(9-U(>$g;~E?jKV8e@o0s2Br}HFv{F+No+wxrn2;3Nthtr_Tm%`x172Or^*#EjY zWKV$X3V`fdjiUZI5T$pEv->!La)A$DP>}2jCa>?r2$xN+^pQ2$w~MX2d5;6ZnS15J zOluAO0QZ&b*ufo`xcnZ~hAmDCg&}nsY#Y+8J>ulw+?JLwO`*g*QW35JzM-7IwWnh@=?ziv&60N7Rq^- zGc|U-W<5r%zi4sw?oM5lMxmcUaANieL z!1u3p)SmTo>*DMzX_IBE%}Ec3?}DIVNq6*-Vi*~kt<~Fc#~gdyJa97+>)92tONZEp zTbZ07v(Fw#5CIiUa;q`dUpGtXT!Z#$}&6< z>4Ru4?Ei)KF%X6v3Cx@stDU7K7aF%l3TJE2514WCqx1qGAJoG|T`yM}1J;D1!-R&} z;QO0$b9dC&yPib6b;FwM=DkB<93q065W8|E)Sk(}jyWG5PSK&)^9+#>XU(1n&ZTeE zKUvCU>tj4!vj$~c?s_ezI{!`)(jxDKVTfEBPbxvLuARSGCv3yIa#V$?&;*GqVI#W^ z{m*)@xZ%e}V5+|9oq7i+P->~S`v02I?}>cF<0uR&2gG#8(|4hYwGZ@L{;VX<<<&?b z1F8FKfkC!YUGUz2gR~hhj&5Yr4QnzYgg>R+c_fbLjt3prsPrl3Tv zw2MHLOU`b%ttx$v(o8G_FvkpUwsLALl4hf-n|ddo=E?1sgANy-Jp!3IOoYk7!|f46 z`-sU%x~P31mt*Arqt%GIWZuR?VH;O!e`*J0|GmSDqBM;)*aZ2826{$tEx&j3&>_HVBk%faaXjP3uygM#TC)IWXgy?}0Xd1!&;3R^#J|Zyx_~iWR8Pf4Bh|x)&E|6>As_|{m&2o&zpJ3|Nqbb_wxLI?$SM%FrHn5 z&_NX81g~oU?=6j}OFoT;SFngMK?i|#-ms1F8j}8ZXYtrEsOIl@$YruMpLyobg5mwB z1veE%f!g?Loy3i_fTK@Vi}ezR8DMU*LX^N)sUj_V!w+? z^DF_*pF)-gtInc}m}WZLalhdE7pQ9Xg`5s~)dpU@OoPHeLNh%oIf>%m&x@=*P=IaOq+B-`EJ@x;Z8tM2MJ-iWa4=Xl)4}> z0SX%>$>8yxjX4&>?7=(t%ba4k_(~po?;jDCs*Nhy3H_aA2g32$G-(W)@i9N>*E|e6 zd34fA4-Rkm9MW1n4qr4nSOUuv$>_g|*g?VsC(XkwrJjl6EOjESbv1iqA3X6MnFj@< zJr*2acOF?6BvpHLG`Z7FUv7>uPZcLb6QA=d3|vVDIb^jo&^MWb6v2{vlSV-k$1YF1 z!2v>9pCl6fOa6)=7SIiUZd-b}UfdPw_;~TqMlR7Cagb9r&kIu3ET3J%MVHBb3Ti@q z_iY&8S$Ukf|D8u~F>rTJ87i{)V=XfSp95KPnDd5}m|08}^=CuJFIAIPgWOJ)5-nxk zms;=QLnb$zXRL-@ztMvU%ufVNKS)8$be&M{UaqcO*MLG@DMc2jDkIwPRK|z*Vi&)=dHiR^)=3q}p@S|*q zAw|6il?5fvbT*M+QvD;NT9?+ke6C@AW4&ICk){i$eL?4aqF~)fvB}J9e_Z$9(m4_$ zf%_W2K7c5H&e3jeA(E~MUEQ?EXGlv^P7-!H|2H`Am3pwv$yKSE>Bw=o5wtnI} zQQ!>I!;Ttb18e-ORQ^0*zkJU$hM>P;qRJoJ$q}rYNZ-kdD3)nDUMjEro9jS(%N^8l zkzDFa3V7YmraCUytLHS=3v}Da&lgKi;ce3Et0bE4sMH1h*4xNbLo5AF50!i>q%`jEwOCMn{>(w&I<=l$-408F!PSJu1(VxpIC^=b;}Q^WsdBCbw%6k3`87}GP0X0 zHka8H*IfG582nPqy)nfVi}c5~{`!}6ddj|{`BA}X8;4S!9YYmDW#B3G&ivAtVP$VG zwB})1hLg5JhMe}H-Dq47Us zhwy7p|8KT!L4TY!H=>jW-kTe8H&*o?EN3Gu9IZs6>n5C&s_Oqvi7r)MKE`?3Rph@Z zCNEDKtp;k_;72MpR9DgMK5M7B_FiF^JrWLtLV`-!H(!~y16ArB1<^K+R(>y)F1*FKPVnO)GtuQ# zk5fI0tcGcDOx1M8aH5$IJe$^YQ^1druRx3BL4s?`ONF*e{Zq9WNhAa|NkIYk54{y1 zBvr4BYtckyoq>T`*nqXZaLw~Ga_qaEpS{WW5mR@a^0)dZ8y!}Z0%nP?33|zjq@_uu z9gYc^j22uwTDX6V;mo^hKt1TkgmG>Gerfe0B4R=AIFQ15H_xD&g03e`gh>x z0+$1LDi|pN?hPn;d*Dti9)?P+CmO8Vk#l9lMigpEak+JF<)?~xsOk3yCJW}-B-XtN zMWOdYuf@6_VEeNATP`gV4vE-UD<9Qwt;cb&I48|_$i`N%knpqnb6ck7?0X7Zi$hbL zA;Fs8TreE5MDB-qf(^grIzQV(GCC*}3tmGr#Zp(5MmO1#kO0rbtVGhU5LpRF%6I}t zGX?jOoNCnG`CRlgQ@A(5h>fINb}t9y?Vta47M!3DjmPK_gwcV;1vz+4bsW%U|OC4iU7; zE6l&^E#R-OU)WPp9I=qyE%D!Ud5Us$P@=}prJpWEHHi@Ks8ucn_4bu*5Bwx7&67ko$kpFG`MRnyZHClSQO6%~`|y0WM& zDU;8d4U9?=Ksr^k*je$m#4Fu)KNhKHssu42`67?K{H^8Pt2796^2kpPq7f0qs^2G% zIKYmOO$kamIy7;b_ltch4Qw5ey=8i-CU-w=*Y3B7m#zG)oEjDFRfIFtFe+k?pi4%Ua^SuJ8=V|Lm z05QXgn2@di!JvLQiWm~wd1Au`qKq%D_qN{A8qCz?Gs&E@ai>|oQk0Oy7=L51u>)M_-o98Z+JW<6|36nUs0U!OUMgyu)gk z=4iMILtFpq>RcP5AV0HFAT!HTMCd$N;#TSAoR_0H^AeoO*x|`4HMD!TE#Hbid9vhv zOQHq)P%LM;daaP4c}Qg&VhB)v*C`w8Pl+4WpcYutD@zz&SR#r+d}4_jm#s~Xey{f6 z_Tcdd23kX}&drfHTr_{Axxg9mZ{aGSSZ#6TF5G9e zD$!-=*h2?VSe>4(zGYEIEpX@&!%TRcu*Bb*#_Vz z$%)7=J|00!C?y_&C}N8Ms?>* z?z}fTtt!w?BxMT}BMdj&9}9knT9d6qOJ?at7W6iIt=hdg+Tv5{B?^lc+nd|E%E>pR zPam;YJ2f|v1f0=P$Hw_Wr1I9Uqas~pnbI`%TCpora}Um4!F0K-O00u$Js=L18NPJi zm-Qa=wl~aF)jt_}*$6h5)!m(gZY+ldSJ@hHr1)FB@77gbjk~U2Wc?}%4s4*4$4~d8 zla_JC{~e%|YNH32j|PBVrh3`j{#m4cOTtrbLGY6>63f@g!S5x*xxF0A0RyC{<>et6 z9CULfgJ0co4=eq{X0RubxM{@i#kO_%G)sKS#54dm$iq-!YXksHtocStma*Fhw@Ku% z)#V8EP0T?K;{@0<@IA`Vc~2Wn&$iq(fgl@gWqeA4cfYBmXMasrtv+2{pli+ydHpa7 zhg8kB>FWo1Tnt%jJ9oZpSVK6<$r40oFwlS~xW7|zO{a;IEL=DaitcTO)p>$~e`hf$ z|8kAhDO9rLlD9nX7atz9zOf{{O2t>wlY9|*KHzPsKtwkuRh>PVOV97{Id~e{BhC-H z&*W%zEh%aty~+Q^pEb7gS{?j!oL+S=s+zal^|&_?`rjpDNTx%p4{m;aJBYbli6lc2x3QhNKvT;-#2!0sqx)4=<@ zZVZQX--Go$&ZrwbM7Vbq(AM7r>r}_WBDos?H9Rx-zqkm%JP) zHMj$I*7I!G&R*MJCi36(cNs|%EybhGNECZ_wA4CKI{b*q zwkxSP+kgHfewGkWV6~Cbd`&WbE}#A=~O_!zCN}e**V8DgZ>3y`myvq^+1biX{20ypm4xT9IFk3eDYKP53&gk!iB>- z@rnbjNrW@(;X$Fh9_-4!4St4e2VCP$xI!3F6;N(D7VUCG`23j|$W@!cd#_VJA#EEz z`Dz5ho&o>I$KP-w<~};MkuL+zSMk%^Zt=HQ*LSNd67dMJg?FnbLeE0vhRwfT*j3k= z3xC>WL-z&638G5e;{Fu$z~zsY{e-gM+34A`@hvO{ZO=KH93@I>`T4hDVxr5&iq~L~ zMy=;{=D8I^2?-l{0wc=8P)K+erdU42YU0?{3G(w|Zrpg(kDIKl!A1xVxISoSqA zAB}5VR;pg|0tNh-3eVPzg0&4Z?vISa2~OhT&bO}AD%xnL*I*z0tVwhw<7+90Qh?HG z>J2&>ICkp{iC=Ilw4EdsIvc4)I9IsosIH>n;Llj~v)S!_2{j7cd^zzZ|C#-1F4urf zpB5r+^?m7v{rbS>$Ng-ojE18B<4XU%5B9sGUpj4tSQ)(|HL|JmyFoKZy3J|5TfGb> zu*Zu{e5LRJ7$%F6;x0Hu1i7R#aGP7Ep!G3SlfytxBwk z1c529VAjTBrUGw7#X+MhS=mCh{xw}f2j{-^Sxo=<9QxQfNZRU~Qb8O*XJHM^QzXSr z{+svKN*LY^YdMC^9`+SFPa#wz$44$ovqE~`@~P7o zyZI*do?QB=?T@{AOfhXR=VKu`!yQTU=rr*$`A3)3rdDm->?@GQio?i_S1{*twlEPI zeEW?5x^s*qLs>3X*+2GRs+Pj-*MA|08-$Zk^vXP4*RGG{fb^G$XOpYziMiH{f zvPPJ%o5)y)edVuwx|TMyGr3AI%cXFIHO@pg9b($}iR4L^;Ocz)(5qZZ^KX^S)>#$l ztu;Q7n?C=T3yL(wOSSDL*yW2PAqU^RJf-0qcrB;Zo-eal+v6vM-tgmCG6cg=s} z_2qiG%wmbeElFP<%q`dI8w};dhRw_ivDep>nE%xK(Laxy&^Mj+SbrH}PF_!qw&tmm zk+v2T03(oJUsnnCmcP_4@-E%Y_)@T85@tWJf2=1(*`4_`d^yNCyY%YwIs>J-ZIr>p zycTH+p5J#S7_e`J*>h0E zqH%psPYQN1kAOK{xXX((jg<}DQjuN&W3lnusN#a3;>wu0KMlTf<6CcI7xuLefEr}9 zvkacEN{D>)L=?Xp&d4Yja^^)y-7ugyB_q14ub;Ffdbya(&5GErqiJJKf2nG8>1I6p z2BqJDdmAR2^0o6O{lw-jwVe90Hhk5NztjI`dGY<aE{4$4$)9Y^woWIIT z-M#pOPNunO4(xckGk2Tw+gc(gZ7$L?D_&`P=^h`3sru5i5|XVTy$=o|LM#4B^0b z4YBH%>`_yh!l7CcM3tFepE{iKzS|S!=_V#90YkpO6>5UM>^PU&<_&BPSaK>$nf=^7 z&~*D0rv>SLa4&ov+diEb1CqNzqSikM2{AOvC$oZGb(jHP&bRTMTZ}93#&Aoi^A-c1 zs3HRv3F_}vdeuPde(XVx>_W2Ddd{s&AwBo*$U0`u1LFMrREzt)iH$F7r9bV4b}f;_ zxZ%4I%lZDmO3cv3G8xlYS4b*HFE%!M=1kO|H}ILTKr~XxI1#2_ptj00Hp(dP+-P7OnnV z?-E*?$Leq-8LNJcWzKo9t&{SU>Gg`c16)5q658~cn?{5FEuDi>TiI)B!fEq$oAb4V zg^wuLWBaz=Gf--e{r9x>(eg#!t1NFV(B;#ic?(VTj3li-k1%K9Cj+RwAE|~XPx{^H ze5bd={QB|mVt+9KHs6zv8Y>-Qor7RaS;#s&MIRw?wrO% z2Mp5sOZvR}e9T;5+b7a~srn%mfk}X0IBR12brsV{knNB6^~v*5E3p{z!qQ8v;j98V z0h2m&I(Alo{m&13o~8ff0tu>DgfJ+nvJX)2fW0VH&M3~ORtS2xcGEy-GrQgd{?&O! zHMT`SfOvY^yb`$p+aCd8AsIF!+&0a4U;>HCs0zMVMCx`IRsFZP68tn=4uH%Vy?LxS zR32_?6F?xD z+aZRiLQ#j$1Y0r|BE`Vg-3evavhT*+f=I_BJKGyrhkt5pVW-dp zMpJ3dAh&I4#gy`g9HObN@D22{1(9s% zLha)%dC=X3iMBr%iJnd(bz?!W9b%jyFR}d7s{W~c3L*tbex5nxW2609cOuMtdeKq9 z4CG_&_j2{(=Byt#vrnHs3K6@+ebct&n+O=8(+{U_s_4R0RKF%qwMo0LPo%p9`;Y$p zcTow{Eds}2tM!4xW&AJeaqyTG-Dfx-fb(wiXR3p{whgJ5ib*+ zsCsphvTh|_&A(sp3F;FwZ@pzchr07huRU$GoAF^Qe;5%Lyzgqi8Ho|<^)9h`H~hB| z179HrI(qDPenu8Hten1e?6U`TIVLJ z5MMlO+4M4gDx8^mczqTi*R1pH=6H8& zjic#%{>MnM_izv`4ecM}ngxP^ew@vQxbt)6TVcY{~xPFBUqt+g&`^!_{Th zJ}|BPu+6m0bZa33`lN_Qi($ke37SxGJiDc~Jo)3ZBqS!W0!BVS z_HPO`J9GwUQiJva)qgI+0@HPKpXFEG)(UV%pp< zziousV@Xc@ZDaEi_$F2@qspx?)ITKBdA`t5?kb9j?Rt`?zco*bq_-vTHHc z>3!)>Va3r@qz-2bs|l8272jJ$)t+`o^-~^-WvQ~hAUAAP&DaV})B-haz^{I3;Xj%9 z>%fqI>S%xd*eoFkEJV&cSKTk{oUr9ce_ARa7jj#eLp`MaVWwEEoR$F&&n z07{Pod~&ppxa?sIBaJ4Oa<0ZOUh6Ql3m0yp>F9{@ayM2MTIE&vQ?!?|8qn?+yCIcnNQ1G! z?h$5-IL+5RP@pY>~`$@{;$Ofi~f%ipmklkTt)+u4%A9K0N0<*9>zD{kMHJS z$-%AQLfzsYFcZU@GLw8}2cP3%5sHXup$2q|US(2*^2RotMJ^50ITR_>>zF@>Kd}8V zAk&ti(WsQ4KUD-Nngjn2LNRGboFi*->9|qi@-v1YxpvvXBy)JDe3rq4&~RVPB9Q6B zQe{jQl;(a_X@aVP_q`hv@#K#5i85t*#D?#HlQK;T09UVKJgRsLe3@r3d4gk2fX(8h z)#?$(fkEv7{_QNI0{$QGaqE{i#laR?4lE)(0=wS)38yK^$y4)YwP>D=Y2^+2srAa( zaHyt5J29^+wt9$wQ<@_Kckm}vR9q^DIcP8ErD#UIUF2#)_|Hy~~u0oG)kO|ug( z{HdrIx>1#=^YIZNa6c9N^v&xNk{AMv;32z&V$OX7BOISLcKb2Y5M;UH`iGOr zx`*cJa+F_gjvj7!<|J2sbM|S{oo`+y{vNIT!`}4R>yP~%AgEfQ;ZR5)n?ArNIp(7M z_Vb@>|I{?ItXf^d&?xxe?FpY^J!8)%j$H30J#m30{nCuh(;#16Mr~qG3X7YQt?lL8 z-*C?EspK{b3S`V~f<(`ZVI4Z$5Wl246g#!y1RiA7{E= z);Z-;^67Xf|KmS9$17z0!Lu}S9AWN=dG;4RI+0n1i4h2J)ibvEQi+zx6tjAPnu&k8 z8faHmNF8elySP^~k@D2J6U1cKd~_x}q75H;BXGaQ55=XKo#Zq;ll

f#Qr?JyD0}MShfv*h|?jDV7J*dlN0T(f=!4Qe-suVm$%mBT$xhf zeZV>3Whc3TUO#O8m4PodGkrVQ>n!4FRT2?v*zW< z(OiD`_xKr``wH4Le~&I%N~d*HbB&ahf6?Z$0_G9~yezp@Uon?O$CL^tTD33c-0|af z_MRWoKb@{k6^%ao^>@A`Qbac0m~j|Z%8TXW>q;4vpY9*Nz-m8ZdabSloBLP=BZneR$5HjSBW=8O)wxrCfxDH$ZrO{^89IAsRUz5I5E0aU( z)u~LMo#3WJyn>uUAl$8i~p+;a+*hOtlqcRzHYG@|$lA2LEU86oF(-Uzd=hR9 zMzb#`yqVT=pF!MQ6Lb;3SWmHBZip%yH4JJy+X|QpD+)8&D+hNjkV7cI&!kgyNrR^K zHStDY2W_=A71vJYt+n4gNar)A6%@i@?DdP&58D}PA?Ce9sxm4^Km>&uWgtLmz9<|R zn%oc}rs)Z3GYA~>)L@69bB&*Rxie9_tpM;HjdaEJi0m86d}4EK$BVVxOrBh`x_j{w z6`~`j$Cp}(4APrLi;vD^>=ezOBR1hK4Y0+jhWb)-mh`*vLF2!No_!)!?=k*+Y`pBT z;h0F=mXFr`7=pm(Sw53BC4>0KITTU{kAeOfq3;pD9yVy+Rz6E00H|HOyOtrgX00@v zYHbODH<`l|p_P33>kK7rVXr8p<9A~Euuyl?I>FQ$psRPz{bL7v(s?dtu z<&MTn-TzbxTP3Pps@PVuk<>5n2Kg)wNUI_i@t=pjuncOS>ICGvVsdbVTn;$ z!b<+jMph)1z^yY&~9C{gD{wpN;j#AR*B zTh4F6q?yNE-xM;+8@^APV<8e2d|4ByC&9dLcgmoqWu(k+(#ybmS2lpArjbAYymUF} zUx=j;4Xar=qx~Z%cWZc+p9TTMky7Js`j_Ddd(UyDz22Pv+{ikrc3Vl{N5DNM^u zR9r<|Vw*myS~$AK{KI}il?LVat4JZHobl|z8L_#2qTBM>@Y8uunA@>sqy0xQ3$TI-{^R0R$ylYkX#YVk@R6tb zO&zoZz3KIM=bjVwti=%gJD>$R8Ih1Pm3|A7uY>0~A=()TU4KJ-l$cj_n$JgY<#P~g zMn;*8t9G5?Y!+S!5kGB zA=fb{TVCy}M`s8GUittXTtGESZro|rr1ucNo^95W!kBKAzNvkx)c0?g*(r--!Xv=x z{iLh9e63p5Srl2&|A@}4OtlTa;~HM6qA{|Zdh#EpXjEvZ-wO`#?Op02C7Y;~%_&$< zxUe-l%XC>;n2q6}xSvZ_vV4Fp5ur44M`pQGMOfHUBwY3Zp6OQxQF?SNoFJw^u zK&p%iYtTId1ryU5DIe%h7j3t_etWz{|;JvHoNaR)#k3WS|V)JnAX1V zeC~2?&l%pv(re-Ry~m)Q_w&<5dXH+7WJcf)_VB%A9W#l8?Zx;c3z~OBo5Nhvi9xjn z^ox`|-AC##qx7$E`{uOF2jo0UBQ9`0mot||shq1*F7Dm)(_3qc%ls=dQ?nc|4>HE| z%C76q(y)=bC2*}jm}IoXE#mcN?lEDMTg$IULxl_7nGtt!#9k~6XzOfNt9ERk6lB)$ z*V3}NO``X1sUxO=&Tw3)?V5tyurwj&@vWO=lnQj#d|$y>UjxS{jzTgA-Z|i9+kTSt z(%&G$jeGC%D?@|NM^q1F+&K8xrTt9_!(-~rRD5Tc+*Vv&p$!!8%K!DboVB15Xe2bs z%=1S<#@Z&1Q5fF7($S|U^R4u9E-F#uqN9kgdm$%DtH&;EcKLR?IEATPXLgCIpzE$D z4)bnkkK1=05t9A>v-Ab8Bzs14JZz`Ycw(pIase`^?@kmps=YH4Aj$^+9c|*{-q-<}q6G^@)b$p8s!|U-f zIhxn6CaEyWd|o0n5~72(`L)Z5d-;50Lfg!d0z!fbmQs%3+eb{oUlSRomfUPb+Kta2 z2mCt!MO<(T$c+T2ZAuKYKsEpwuvu*-<6*`EzPA>VsE&w8gHxL2!(PyAjZ z!P0uW=wZ3qU7UTqd%;Dk)$jJDTI4By^Imbc>tU%JYL&>|n;({&t^ZKn6$s$29kc{C zH@^B|p*KMGaqwg{?B2)C$+Z{Rjar`EZOJk6qC)%zlQ8~&R8P)u==#VUfA!?jxzQgD zFv=P2zgpzYi6zxLPYR-xSL>y2wl)vR$_ zpRRYF%kpe2yq#~KR;DNNl;l(gOLjw{gxaRutpdhSlQ2iTQNLYj@OLR31ztfZ-P!=6 z6BkG(9k?!q?`^bT6v3N!b;tCJxC*9$eqKR-c6fS{)vG)UGroSWRUOYl-X+$~Yb{Ak zh4(Z(KV&R2$BEL|o`?`Hl@g;!P6g|VO={K3=V;R{H8|6QM&GOjv1XQer)RuNMIG&3 z%_W^0T5${bUc{X@tjNqntOOV7i9{)-j=CK?=macFuS*0&zGyx~lZa!IIbeZ9H6b9oZ{C?!a&h!v zLF2O7S`C@PXieNI$ym3;qN3~D5U)6?-ewgB5vsjS)1D6xOje7BoA2J=U>qOqKb-w* zm;jhcFIY3L?gajZ*JkXi3x!UZ-1eumS#2_d$~m697>5QQ@yMRcco-=;Dem|~Taz+@&o%&rW02?(lP`Gr zU~oBw5}h2J`F{f*@8_`h#-ITLJusXUu2hhIaV4u(Z?jI-n+~Hue(^&minrZP^;Inq z#QHh~8fBv>qZGQc77aIlZh1xEKwAr+8oc%o#^urK7LLTa9wQL^y*(r+NtOf}n%m2; z#(KK~YB}0SMl%?EM3qyn38js!98Oua>yLD;FdG0- zTdd$k>xN%6_?4(wNKOg9CL+Q^OKO!55Zaleq1iBPQC;_jX*SB(>7mV7$cmAoY=N4( zrebgQsMBd;WBdvcEu^uj>2ZYxee-RTALq|W)23X0AJHkU053i zIB^F=1>y5DIMDjblUkLIlf4rW*?~RAomIM7+&LnEoLC>-s}Vx}OmFcf(JNSsqKV0? zhk(AvP1(~PNMMN%xs$L7?NGo&cYZB13BGc<>PpUl#AP8Ah&t3+X6JF28DYY=`+;o$ z`;7In42BV*HQX+hH+ z8P<91jSlA0B+SiTon4+KM(1rY1mge&{Ay%ZajZJWK2JO?3XbGpq(ZrkJs>dSx|U0&`r^U!+C(*6YCmjk0-avfS>gSs8|v zdD%h@Few~Ob|+P$aK1D1$m(egE?QVKxn?R)h z$G0DspxpZdcUwI8$8VS6AqpCb+|_EFU4iA*!GaKm`GXcSKs?C%7*K2wx^drNiweh6 z#G_@62#09@#q>)uqP*&?55x1$``zW<9zc<~N>!4u+zjg7`c$meu;b4E>}1W5CH;H8 zj%aUGgF`z6ta*ZAmZ;UuUx$ZJcHFvOS#Zj){Zi*Qe=UFNpSE*zaPOysHg22*Qs2R` zgO`}XL$aC1l_on3Gun#Afezz*9zGOfhy|$g(kuq8Z*<-`Fh2AB&(;a7i{+J2bmCTN z#n1}+EnW-`(K{jWZ&I)X3m*2Gw6J9z3QH`F1J$_eZNgZMBl`gDkmwr?`k!4ZR#f{OfrCh zupz1t-uC#xM}=I}%XA8^13R&%+9R_kZ{vD{hklpszb>1yVbu`KSighNhq&VgkqoG* z9OfExQBs?Dh`)X@@A#%z<|dsuHjGvABav!8idvKYV6gMO9~ye`Mf`@f8orj8jJ zH4T;6Iw4x7t52hDIfWHz_c6GGwe#9`>TTLi7wuJ!jMZ~>6>pVRW*2sA^?Bl2*u>u= zk}#xDIhu;knglG&=~nqe(AuTERwBUAT4s)t)g@9cbm;aC!RXxi zl5~UI1P}+Qj&390fF|sXhv#ArmSILzgq+bKTBOgi{_B#_PI$7a-rCkGPOi={8~~E; z^P%U71M@ibz$ai7nmzVs`7r2=6O6%Mq_tmgHR#8$e_&pt%&+08Mtm)!C`PgvsxS|| zB+G#vcV}&|KG;HO)|RJi!T-yT?yWW0AKTXFE%TC)59bggxEYe;Ee%D?`*@c4H6-b{ zWn^c5sJjwpfC)suP4Q9jgU+{j`eQ;hDp=Gnd}J~%7odvH@P4LB@pd%+pC#xjl0AdV zbjDH85HhmyuWSsvIeiVL*0P$hVZ){^Bs58BwZ4ECRUOoF1>IR^rHlK{P$Ot~vWqWC zi#Eb#B3wX49a~#zx|48)gydgHy_%PqLT{5o)M&=lf8ZBv_3b3RR&uSYEP=(5m63 z$deTW>5XIpcW*LE(d#7BoCeHCRGIOKeSqTWqK z&@<0SzcE&JCS7{V0JEodS+&3|3_!qC zm64joD=Yfs0|kp)V*%IAA)vCKkGG|0lDp7x+0BWqlVdxV2p_!vhHAW@!J zEAR(+=lEO-j>QPo_?tX47Pf}dJm+B z$M^F%_n20$yo4(IuYZ|tHfhLoleblUSq=_{Q>I^pQ<{jq8~DKZ0ii$Js6FcTv&HJAt-_?J}dgq2TxP-ut&Tb#IS39C|q7U(kX-h*lEDP z_^^XjF~=(NnR4$}bou$u_28So-GUE~ZAUGxC9Zb02jd0)a0S3tRh_0FCUH7(*Y+pv zlS%gDlT*PqU|Fdx*Vs9{e5nlya^#^`&!kfLoftQnKu$l3@nN|r+|^1n=~ZEEs$z+2 zc!D!i8q1zypuQqth#lV7#J*XG^f~w!a((G%q-VG+*isix2=Ki}3Bz%CdRvFc8_*Rl z8E)F9o@*R)3hzbIkfE~C$_LkR=|#v{)|V@uj0p3`nP<8;*XAgCO7{gmcM`z^dinZZ z&M)v9h-!uxq|LD;q0n51NR$TxEeJ1fwXN6p255LFgkGqeJ-t1N9cgKkw+4 zp^PZKr``Q0e4tJGN_ME2nD5@@5uX<3>Dn)U=h_tsozJIcg{aC%aqNf7D{qwDqt^BXdMYfoI38;*C6bymy8&oZO}R z;BIBfVVU*b1XshqW3qIok0|1KGzcHI`Ir)*nYO7@z;Zc9cYK z;S!E4y0+uyt-MLuo?E`|2XXyq8t+Z3pZDqSz-wXCotfc9La2#vPJd|H!EvBK1jV7f z@ALG!Vcj1OK|Uo&q*6PkLG2cj7=#!C^4$Jg{WI_(G^2XGEt@Uf^V*{+0&(m>QK6@s zfkpfW^Jt`rk)0D(r!V8H8*_;59ZyVn-?Re}aLLDfU$nz_r0 z!|TB>7P=q8#A*AP(FoOaU!a6)syq&J&Z9JzTuYqoa~br^5#fK5Yv;y9cu^V*rc40nay!I%BEuc4xX)Y}_(kUw*R zu|c3VP4P^Zhi|5BHQPwmvObC7#hx{3f-ny3$t#TV?(z5nGtVp$O8FS8W1@Y%Zb?lI zx7LBTu<*f>*gYUIzCx*Jcfip=sP7cz(#!UiKY1UVP`-Y^fQvgqiQ-u`R5aB9%yV#S zwx0K;A3utO)_>HH_58st<6|bdpwxg4t>NsD@;Ghy9 z#^T3WCP%Qd=B}DAk@>s-)huTB;-9G`83`1NYb%{C-3C1DmF93m-Q(1*n6nfO9Bpx9PnV^e zk;P{YH)qb+m|frzZPEN-&PtX(V}5YE&)#{nTDO05D%j;aJ^Ut&VLx58+vNzzk>^Uo zqMK0!FLB!$bTvy^wdHXP*rseEhQNc)6HzS|g^g3u~@dWKFu?#}*5`?oWbGSM+w= zQ2u`OpxCbJ1wZNYsT2Q-&+4hwH%)kk8Kxj6>~go;_mD4E8yWl+F8{H;Ib1sreCQ;i zn8y_4d>6Lgx{|`BrT2WPW6UWci5muYXHNb;l+{*hww9jY`I1C-lb7}LJ{i!7syk1j z(k@2#)1OCmVRYEUzJ!lIKX2yb^UdrfFp-=eF4VGL_^O3py_WsW*l=DubR1oRRcDlF z(0Z>gz@$_+$1u*JWrFGz+1RiC)m|YYc8VndXY;AEOsNOmov^FG7jWS4$iUoHF9f^Z>7LR*C6XuF`NXlUnfOK&gR=V~ zkjX#R?sJgondweVSS>3V)*T6L)vhaZmBK#piv4`hii16~D@}?~iU;=u6x8$z5i48k zBkSO0bnyfYueWg6SNYEBBh4duELaC?pE%l1rhS$1hAY?%=QXUO6f1Lhuc9f7J;~V+ zUJmyr7;ZV{=ts~F;gqhe7d8#n+#~{IQ}enQ zi9}SucXM(TAn&RO?O4#xsxsA>&5AFoWMi z%=IRPBdsC4H^sg<;>Yj#)jzfRM0n-)?AUyyW>KYWISH~UCfdJYUh3&q8p#$owhmY^ z!hA#unqZHp=B~j(Rs_?Tw^%5`?A~VuctxjJjI=eQF29|0x*!yI8S+vlEAfKp`k_laNU(K1^zj%?!*0 zFFk~Leaz+nHAf&PWXIFnHhEZl!%@Q*?0qMvy|Z-*)NR{}U| zE^BJ%HR@xz-@Clg%BG;_&oNW`P@DL>oCNhM%BjDd)FbjH2C)@3Zgs^j;rujNd$lWq zGzF4vA6xKEFfAf*nz>=QpcOx1J^c?o)is!^_}VyL-VCJg_f_9iB3ENUlYr8a8C zM0!B&{PbhdM`reMwAP}c?`9e@ILN75R*d!tl4x|n#|o#pC!XNLN6}NwjqHc{fVG1# z!2ZJsWB$VF9N1v2{NZrb?J%c|&z@aT<~}NBtWm`_UkGdgufCYKbpzfmD`oRJuD-GG z57F*vC!x?F8qeb?Zz_SaK|Ax4b4Pq{e?4DXJ5iD9Hw2`(DceEsQ3)yC{`jt+Wr@8! zo!B`(K0H61h(Iiu5i8f~9d>7rygo2^Ql4gKXNv_MLM~u;kPO3L^S@h7Cy#ID(QBUB{E(Le}hf1f}BNF{P*e4C9&m>MFW;^rM`H#=b!Oy|T z!g!e|fN*!E+%H6_>1NKW7vxZ)whplJ~gE=PBqxWzHzIzq_C8+(->WXdo;`~F=xRqy^>rbq*J@PXqVv!vGNlY9?eah-(aoO!R7aV zR?oCzl$!eZGN}vK;ekO288#-tXAa2g*CL}tdrUf&;t}ILZKDbnUQDkuzMI|pT0}5f zF>XdGiMH)KH(6n#1>a(XP-xQ}>DYY9uQTCW72}EppZ&zyZ{1tS@|XK(r@m+G*sJke zy?y9@bQoHjYM&YSrWW)9969xE1_l4@rFI)Hz%>9%26b{`nNe8%_N_{7ng8@F)5&+I zBa@>0aTYUC_`cQFele5DvqYzbP%^B0v-0a0LDj@N<1x$uCls~c?VmbW*bGcsgNwU0 z-b)m!o_Atk;QeHOA~XXP$vBnoMt#7UsZ+!3n6*qu3 zCY-n5oYhak##y+mjl9@1*}e6(=RvFL_dhz7oIWmF(j$pFXt&au#44=EjQafuDU`=E?_=jIsAY&`?;}yc3{E0TM)9Q89h4 zhI_b`t34n;{Uz?TK}<4Am{WXCYj-VnF(>{OVmNHMjW?xfa(OR3U5D~H8C^*UTf}8U zT*XO?YSOv4r64$>gua(t^N979Wrzxg3_-GEyY%8@0o(RdZV@-u+pOM`3i;d6jkVeu z>zTa@sjAw^=J4WA$ClY>>yuNGqAGcU)*j^kXA*Tyx1#Ncl4obOPwKC;Bgk;7Mg8vD zDw#N5av+yA|eE}qRcd}3mE$D}o1 z(?nGmKd0aKPFua{4--T-{O0efbFt|U>OTQqluE}Vv%L|<&330 zJB^d$G^?d;?C{-rjAztW9kV@e{{D&xVQg^Tcc{%RQS`2-^ERu_y04TmPd`+MXs5Gx zPRT5ou|b?Z4P1&6G}B}#g&%p^C^rOVTbbUa5MvRnsA~^>;xdZcaphw=4ER_k$MvyE zTR*ywR^xzON7u8>(Di82Ot5>R^hX3W<2RNn!X#Ak80+947R#x@1%6II9SLk;cQ!57 za^T+Y5l;>S(dh+~J<|#=_c#8wV8HnK=E_l^m0_W8hO!gpUGUk_m#S`T* z8Zg;g4O&(-X_6K*zH$%q@iS(_fA>TvcXvU!vS$L6;2VW7de)$d&g^=55u>hA4Lre3j&r2V8z8d^x zmc5;3&g1%O_KHQQTcYxZ(QmhukuNH{^2^R2*KljJwqk|C%vX848fnkfj2bwN`Mxsu zX$gLOC#}l=e3()=m-YR(^CpUXSoiaMdZJXT)A%=0=$s7s%=&!$ZDm5i;=0%E6U>jY z=n!kpqVR_G&h&=kMgE%ix7#JFci^Aw@z!-rc>{H~%)}eifsChVoyr2+CVFgY9Dh;K zy{|;wSKpr~4ZB?Xnw3=oo-0WrPgI5 z6hZ%78F7iv{XLaIQ+sCp6C<(1KVO_ z)lt`ww&6n}#K*^{fzqzm5rB)GBL;+fF^4sgTaotd_Lb8c@;0EK2l|%xE7vc73p$!; zBR|lRl8^3RTEsjUVXClAC`D4#syMye@R0F7`5*LQbC~G4ifVhU_c3@Fws;>QT<(*1 zY^=-dVV$jGzrhl1ww<_c*?@rhKC{jS!0pQ`Geag(Bvv}cRz)`r4XMyySq*c0kr5FQ zDnb}~QSey7AdBn)1qIUX^ZJyS&1b<81-DGac0u3c_WJhril9_HBvbTO0&C0=xOLx9 z6TYv_#)em_Xn%Babb5GF*)l z&O-WNrRYj2@UVo`byDNBrzd^#1Sd-(K>iin{27HY^$@6T-Dx-@?Zi=k^8vi3cmCn5 z$}9qg+4KEHMrZtqlEBZZapo69tJ&vlx{`%A3S25EcX7Y=#d;y6L64WU$MA8}V=m97 zdByMt5H^Pe#^88`6P!bpz=Sfq#nluQMvWFytAI=N_x0UZ5`N5}_cEasp@VS>!-QtQ zo*&xZDYn|A(e^qP#z|05L?XGe5^ywE^?e_t5)*6>6%cvmKJONu6R}|_exqcjn#u~_ z$A2dVb1vp4?TU_;{U-{6;Hw3(Xs|HEeU)ft_C@@NF~x{qI5zY!o_T$OmvD3lo#jcf zcwf@&MAd$|HI#FE{lIwY_v&Y^OoHd@V8zOh@Mk2kND+m|8Ye!Irw@8hkfio@5U<~l z-1GX%$3}@m?={A58;5n#9`O49E>T=s>k8XF%DS z1s>JOj?CHFDG8Nxybl5uN150K3HNCC?l;~v0Smno4%?u^n;8H812ls7T0!7)mWpFq zOitwYVK~yyZY*0p)%Hhu39ZkJF3FAnp(lr2-sRN)~R zQyaYhbX;3*LAC=Rez3jGGS$WIc)*4~3`1Z(O}DTtbNF-hh=*nLj!Tjl3s zY?~HY7$~nEGc3Q>TIqtdwD_?s-l+0XXXh^!H(Uq!&trgFZsbo%h;e`oE)XTWN;y&& z?*!Fr6`U{RU!~(x>a5uM8(fxj0g$1gi2B|Bko{thcQYSJ-KheBpw&2g(9QW(O91lD zMjj^+4_ z10imwx(i08z$Tp*uj}65yCI#wJ@m$15S_xp-C#2g1V=xi;sjUr6wTSv7U&R~hj zE;Tq+0vlfWx6Mc^R=bzc_K4c}AXbyd&fAn*z80I>-G7j+TT;Bee29-U{s@+Nt6p+W zJRf*+ap+Xph=<{W^f1w71X0K}&i1-4>l{mycE$QFP#eKgqdh__TXM8Jns0S{&)?4G z-Kq+`^M67T(y!$g6pwdN_28yvS+{8-IMS5M*r&-YT(bzHiYk(c^;o_LM*_46I-u(; z{0?Y1V)w;9C;2IhjcB@eSEnqfYe6`=!tmcB9iyi*_fS$tUXj&Qn;BujzMsl82`NFa z?L1?3JT1pn4GYie>sWoX1$IfPwFOsot1+#+jT4lWQPMTl>mn|ql!PqQ6~Lr`zgFS3 z%KlVkT@lUSg;cadJ8KxHyRNL1h zi3b)}q1R0;{+WY0umL25u&D_pU1ZLl;-&=L1NLZh?T-TuG%l)Q`m|np5^8$&*p0S2 z?BWHg0!?KU27r9KN7dVYTIB2|C4Sy^v+OD|w$k?vI*IeYZDyAPj;|6^%G78F&sYu= zc#ZYt@O#dZz)M2t5gh;Cd@ZAf0A{kBeIlhP##T#}Uh!jWQi60ltp@8FT0-lVLQC^y z(#`g?o(i7mfbUYWk&~H}9KQ|>tC)A5d7*(mvf_A;0{r)(7O5HeFQV?roM{>gPX0_z zEH{EF?bc2hNKsz$Zqz-&D0ZoYZ)*082>astvor>f8LyHu7I zHHddnAdRFh=oQky}#3{cc*#su+bS63L z(O5Xh&bH>V$wtzMq~utO$-+ngENJ=V?meyHjIqp3s{h$tule#y0B}NY*sPk8MpzMs zY=24tO&9zj{_xI!a*>vwGud1O`nhJZ$y%X4`prwXA}sNXC+CdjydVmo8A15(9ABlL znhnIe6mc&crjwY8`}|QZPeR zn;WTi){O2YFqO~~o5k(Uyd(7^7q(<>qKu{gy_6R-+zN!sKL`8BfjD?MB zynVGF_w@CG>UI=~yyh{w^36+smOWB0HPx`W^S6uH)~9R)N9TLPG9j)MGGo@wWQ?@@ ze*IZ$uyKp;wSZkavz#Qe^r>pV+nF?FCkwvk%}f>Y(W7_0GM)G8_}#JCQ5B!EL*%u1 z0Dd$v8>NqLM1?yrO$D}YtJ*xS5V`cZdj`~m$-J*;-DN)!Zw01K%@1)m!zT;k+WtE?nqReM zJSrpnyW8{L9)#o7dGXQ>Qt0rBzlfxEV0g^oNlEH5x5vQf18)CaS48R!PtbYB7{qeX zd{i;G{NpGL_|G6v$H`hDxBhv|240r#cfvczWp`VozL?=pA+#o)Es%5CBwduTbSKu8 z4XOdnok4)uugWtgpc+S49|AL^nP0MuOfTDp&*OfMHr9d12E3!IcR6IOaT-#A1N0n!XlYo7Itqc(S=f3%3SX(z=;qfQXOrXFAGcK{_mfr67#2%CSW!aRqP-CEv+w+c|knusDneFHgiImpSo8iGetJQ(p1H?#eOadr1F_Ad)^K1NdAzY2VF=yDv zoPlUFc1;@dVcF}##kcSYth1g7e&Uc*X&1h&#H!4y$Ae|KSMFrB)?5BcNbLY5nT&TG z{|5&B12vSakQdKUxV`xCAO(~Wf8JvEs6ih@WM%}17Gbg`2}M-=6kP#Hvl~&{aBtkM zvtK7q&il8;%L6EyfkKYurv~Bpbbc@xAe@f`GQ8^84`={+_p0c*8Ik&r9-P(=+26!b zA>@>-xs1Nd9B}B*k{FWbib_FFKb@DwJ*kpo#tqg}*gmVdCR}+ecZ$ZqJ;R2>ftS8E zowm0}l}!}_lF(GMIE^kkHb1sHH^w+?_hXN`eS71>uM9z|@(ya+RTUK(g6P8V z8X+|^Z!a??9{es2U~0l9uw^rtk2gY>0_?I1MtQVVuv4Qg>b`;KRZNzH<>taagxC>e zk=MfiWV>rp{P^icP;J07`&+l&E@$n(L2qxKOnCfiv{E<2b49F&81u#u&K(nIx<;pUnr7U)+mHO&u*CIF)4O>dTzbye|GYO`aW|otk|CZFk zJ>xnJx%n)p3{mc-IzPB&Iv0P==>_8CX^x=6~HlRZSqrkqN#vlKbSAxa zolt@IyX}^WwnC{fAafw?mUrdTp{3s1-?TCAYZp+Fc+i}EFt+ltf90SG;+LQ5^n6nl z9?&0N|G}N!2I+8aS)?!%zMJkiOnD=`Y?ch%fTb+ppnDz^O&f}WW>SOUBWK%e`)U!f zmZJwj7TYq1E;@8+eE+L5TZc;SWaPW-#U?MZ42dAeUZpTN_LA7;pz{miR3R=@FEcZe z%8jH^`g55d`_*}IhN}ZH8;qb2ZWtb^Jrr^J$(2EpFEMCU27D;>+SXu@snK3(Lf zXU3wK&n7O7ytK-Cy?!FHUQM%Q@LmzHjkE~}kYnCk)&T8v8x@b;<>rL0V+c$ezm}bE zVw!oum2kIxHm3O=LH+NJVZhrf&MndtkL6_u`U2kMjtXDm#o<;VY6I?dR{st)*%R?c zMqCD$plXjCV#7MrdKK=$Nc4@uN8w_~$G(!KTQ5DD^@5A0M|1tX!)uPZ+zGc3Y z2)FsSj`y`ZbiH!T;aEwK?BIH|=H^hPGPF%`)a1`Ta%VIzG4CvrAEFzgx)69soZrAZ z-W|}BY)BcK(sYF@*o!VE#a$`@@;!t53mV3dpgE5ph4-hKUgz> z$t+4y3M|g?80U`53{joi3Pc`vO@M{)tSad$lXm{fJ6Q!@Y`A2eSxfUQB0VmHTZnzL zK!LJ;7Y&N08>?%Sf-F`Jy%0_%^$YA0o>-$g1u} zi(}G=A{_B6dX9MdyC5-K;DG~r*`hhc>#y3SH2)-9(>mPRQroaGIQG3OSU!1I9I-w+ zwdv<_SoF3|uE=3X7)xp-Uy8hdni=dJlyO+?(D|=v4dK*gX6jLr6>HohotNFQv=D6m zU2hgD6imw*$dlkmXV1JYf~vDMl=4)Fa#sm@sb13n5a~Ch7P0IjSkvU%*|QgUgozwA zDT_hp9-4%b{i9Vr9n$-SooTpSnKIltaP23<+H}O+eI#8br#3lZ0l9D&*FFvw!hR%Y}6X7lq%L{+T)LR5aQ-!^odhn}^V{5uaE_YMvoSv!Zh>r?9 zn_k42V=u)02Iue4%sn@jQD2ert9pBuY`w;y+;s9zBFr^a8Z@WKbL>soiK}~1%9`9g!RcM0|W-F zr%j3MX;oOf0WhtL13gpBG@fy`fC7V4!Ll+eF9=X9sr$mZ)7T=vnOcu%OxRDgJTpp; zkcPj4%2w{BASc!5z$yVf2o#{hc%yEz+SELb7dag}NSArG_vxp>=^T77H6A4b{b&QH)jTAfk#)C#ZqZv60nx{ffXQZTy_^hEtk+giirHuunBS`?@uD5 zfIsMgJ6B`^Ur%B4{R1C6a%p`hK4-nEW%lM8mqv^1ngbg_5sf&G#n)992V)$0IbEeL zl+i#U%fNRZsLD2|%8Qak#E$aT6k6lsTcGx;69K<_s|O60%-#tZ`i6vauS~jxy>+tv zkG?a*F0N-T{0M3@m9Hp$ia-?k>7L_PLicH+g3cTdKeFxv$&*1Hn;rWMYjKG&e9`)U zm7Mw|j|CWfk(5S@!JpyfJTGqc9mxRV-PNn_cHKPp_Y2i0(w z@fc6$ZGs9f0KyU~kjaB@XIi6FlQH+X%3{Mq$O_~QeIttket!s3UYEUVqPae6K>rg^ zwQI@QurxFmXV(ov1~2BeHWL@Cn&&JWkHXb6`D6R+9zL&Uk_NCk&Hk=`Uo#th2|xQO zsBENv)7v=$Hpn%uT(fIjnT%Y~xW9WZ#nL03Fj9662-rBN^D6^ABHrsMDch~%CVvZ8=d(({{#Ps&N!^mjaSs=VwaRVf&sh_8+EyPm zJ)M7_RQUz?!Zif9L*b>*B3~vqBSy7}I;A zx|^~%8OY`1I15NTaoAD4kY~WDi78ifdELZ#&6HUnuLQBQHD_w91N>_D zokhwbMf6|^%;sO&R);%lM}GaCPQqEPSQY5N-FzCw1{n4p*d8`U7zE`DIiS60!O&UT z5&H7#ZhhxMu26QPrBci3M$TALO@<0;K~l~|)KCL-GFVu{n4$}HWz-OC`dK2UnDL5c zxQ8q?KlDo6)hAH%9!T8R?NYtg!Yn4%#?4S>Y2fv}S14N#tE`UvZKN$mpGWG240YAE zif5m*8iTp^-^#Jaww1W+NNWGjkcP&G?-v>{3bmmBkkAded>V2#6Qjr58_d%T; z_{X~7#{m59w>BDsF$AQYL^Ig%eyht9%7|7T)VY0u>jzpY_50d6j0dRm-0%F^VqUZ$ z&h;*cM_fP>Uo-O`B%x$3U#7T>t8NZuiyIn+f}o~uFikpq)CINU3ZE(+uBEuPNwAm@ zX6CVs9(m7DoQMPjhy5XxT*Qu+QA(aCpS}083Za<8_Er>HN(Xbjk-7g-CpNMROl&gQ zE!Ok4H3jED-}t|j>123c)9iAL?!qa{;S(;B1L9&QnRB8zrBFtM9!TRudL4(3Xe&83 zLWd;e)^1^BJ2eZB2Y{RRV@+*&ZS$&=+`qlBVuu<1M#*?hfa`)&|KK}Iqi#;z2iyqaPHUa zq*rns*oId4i73xdU&}oL{{zAziR$~a<+5C(ys-q$-n!e1ZlFDp+NlB8z0yMK3xjYj*osq6;Ok^2f;VqER(UFJ%@~QZof$zN6L*` z<11uST1JHXWRMBxO-ct+Ud&Yze|y~2t+iV_pykigveK@laH5VJ%UqOuq7F-EJ6+5I zu|n9x1;Rp&egkve9`f$KV2vn|43Ny%3}38Iz{^b*x*D`=-%ief;yq;I`@QttM5Q?h z3-kFi`7U6+y3Ft*Lcy+ z2&LVIWw;Qo#%$Gm<$-3B>j*m}?kOADPHZzdezH~YdmYjvEOD1V?>Djd61B~>80*D= zF`l3E3?ffkxe6(AZS(K}Iv-c~6Kzf9h$=x|61H&!+7LG5U9Q9np|m|tIij+#+0+*Y z{F=S3tf_cd0#6`_l@?ulbBV*ap5y%zPqweW6&b6Bm7!2Tu z5wkg__T5w2)L13b4b9}#M=?cFH(5GAAWY9k*IJ$({#nhaeb1zshjgcg&!OHG32aV7 z-sxa^)*rr&bqA3@t!6Qiyq?J5s$|wF${b+mBxydkfc!SF{K!}Scr@YH_Orj!@Q0f3P)ru% zFDiw>;eJmQ=CoT`Odp5f;g)#-MLD~ z_kEyrnyzZWBF?9JAsL`+0^XF$FlJs0;-Q0!uyK+=FIMUU7Wza#I`@!K-Kx0D5s ztO*owkd+Gvbh%%q!&LXFytf+VP=C%heoCX_(>$(3Ze2a_#Y4TA{-%0?Qqy&KMQ4nD z#AWsacaynH2d66qwVB``peEhtnx(8I137#~SX9lm*PFc4A+-Bkr?TxQSjyXFH`(o% zO)w7U@uiD<3#dji3*2#e$Pjdt+vUhuSQMVchu}w-P^}YC0BZ1NPxrmH=w-i<|FS$vBDUC`F9p)>csFg&B4)b(~=a zD{X1=+5LZajo2HT$nHaHfBKnf9psgmePR-8hjxlZwAonlq~)IGULzbl2CpC|9YoH8 z#oEAP0%~NtKj5O%8-GHt7Y)>Dv7CEnrk9_dW@#+ z)yWhUE9q&c0nRq;FQ0ceGNz0Uw5k-1c$T$|zRZ^Lq8>2YNqLFj9_*|6q3zRT4FD%M z(x))BUgrt($`k37Boteba#5~X$Qr>FLKVF>Sndlt((N-SUjk6LRWAb4v+UYzH4bqP zNWHt6jUD@ajYg#HUi8-EK2%9mr5;i7?^$v951q%hKioG}T;ofbOqy z4w<`bt)3BUyog_(8}ws-O-p8QQ$M-wH>GmFcF#I0v3x@jWTAEV#-fb&Y4~8$*49$p zY&jPyjHNk%Bw(n2*>huczdLk*woPEXHn#MQOkh-M7DK7$(hn6i^~cVbmR;(3V(-!1qN#$F2`!|z(BB%1`s*oLU#DrP zy9u81Bp`r`%S2~;8$0CszcKr>G_A+NbpEw9W{HKG2|MYOhtW|uJof%EL?&5YH0 zAHl?Ni@lAHUgc4$G7cDNVnJL4-T!dSI&_rX1af`IOT|r)ZW9|acF*?ZGWy@|g|DL| zDyX$s2uW)r&Qkr@n;Pa+c`KT#K=bqNrr>zn)@|w@jw8yFI<=`BZA1PY$&pkou&|Kf zoHn19$rw-Sich;(1^Yru)k20?U&W5=j;{{xzhVlU!3^L#d4(sL*rAr*4;dSBA(^*| z3EPlxXUwg#D({%we3p~D4|0y=iyk|wjLbV&N;@rWR74-dc}M3|TL!~@vf5)m+q^jz zJg2-(WBFg!972Mw(LRQL-^0<-J_2SGz;=f7Z~JCo2ALTBmX#H~c#)+X9(Lodo!&fQ z`Cd0%#P4DH1D~1yL&(&YxX4}^c)VqNKtoF~G7AU+?Tg<#_QCYO zZ|-rBb>RY}L@9HF|I{nTuWiHse{xVyUqx8QEU2@(i8I0Sch3l?OM;BH}X4{rb5@4fdv zxBjZBIyR@y-ltdh?zOs4jGBrpIw}z=3=9mqyquH<3=AT47d{^a4!Sv2Q1XC*34xK9 z`l#iTebR07okSkAzn|zY^@FXtSprsD{3Cok(lokfTHoutzTlwr6W5 z&N`r;-4;k4AMlYV-g?})b9HZh@?k#l{P8{LD?Lz19E&A=gvi%PO>mWqm-Swlxh z8~?|fHJWiM-evfc7?sGgYw)vcL1Qr@t$8#h!nb?E(3^J@b)vFn!6ALV!Zg92ua3ce z3g%;vgjN{}_;^kZWAZJGDf%?~c&PJ}ys3dR>zOOMV9j9WRLF9(dv2~KL(KzoiGWp8 zVM9ZGJtGM!D=XVk2gWxYoVa?iEnRKxgs~7xLwB_NLjzCko8Y_iH)Mp9;OFx<5su;P z1_&dbSy^o@du^GKiHU)!p(5kA4nnfp#na@U1suh5uivv@-XEG1UZ1;_NWBnfO2i2*87SU6U2-}&_eV-dhofaraMMBEIV1n01n z{9hm88M#3Y1{%}|220Ue0!xyF@cw5HRS>m!M@ef_ibSDsSecpz{js$#UOq z8J|l0&S~NO0sSsWr{87q`W}I}JA;Wk+Aj`UZ7XeMC-lY+>qhRztSn-L55pN^Sl?*g zQIf<&rnXt|X1Jzeog^Efd}Ba$Qz}WJb#_+)_n%BLDy%&-JMy3a#Akup+MHY6-n=j& z8U{KhXEz<%(%TpuQ3Jl_^+dnhl1q!>V2l^{^d?^00by`@1O12G#DwRRku9$XgzglR z-l?bg^q3*pCHcS-oZ8|)F$^zqat6y>skrdJt-rfvDP(t6GyWpsnWD#a>q=}*2-E=; z*;M*oxzR2JRN8bofLjCk6@Li0qb2>}NF@o|%z+3*ti(fpZBw4exc5o*K@Q_efXGph z7y0Mm35VGeLOFRlX|+@=5<^1p)q`UqNwsSx~ng|1DhZ;|%^9$bYAQZDAU)@Pk{b zXhx2P3H9gN^D|@^X;|laNMo|AXbCMsf>I`PasPWNpm^>vdH<}Hrr8XjA?U(`vIR(A zvb3STkydeW;wQn_m;O;_(>Y5id|+$$(a@!8?%1ZX-_vXGw=6UFn{x9#VTqcbH|tH< zwa5`kGnmjdg={57hvt`O$8O*^LS*eFtzkvlBCnw=rWiGbVBc6E)CpEF!Q36m#q+|8 z0}muX?`WjuE2Yv+1L z$;j^Ioa>aNd!!eG)-9{Q%9OzPfy#L>^~93<>@#8ay@@c`9dYOEdFYPHA;C?1_SHNc z#7&3tMP%B#mG5@iL{c+slUzKSm6bK>%f~p^BM*sabHG%0qq&1~dRKdMb4LePL;a{t zhycmtahMs8PkH)BDkK&{#Bj6Yy+IB4-14(%_k2!Exp{%)BimX{u!R;APR@x!=8TFHGu389XQy!=^HeGO{TtJu`T<`-#J%~ct zKw^nz9;dnC5?Ol;+*9-M2cJ?Abzoyez|?jB_1*_j%RO12c0z71pGwwQcCF))~+?s26a`4y%xrU8fC zGRN@8C>1dZ^fb!$3k6nH!TK|x#x;=5d+jcW;~UVXooA|wRM8Nwl^j&clq-3iEzyk& zT778Zz(=7>Xg4i|WJ(Au$T-)wvkJjrQJKY6hnyJOCU_P2wv-D?P(~>jZYaWn-FZ=j zerFfY^L09n2JvSOXqkhPb_LkiC{8LNWwzjJRBco+cZ9^~l}&)i*SUZx_MYFixvaSS z&W}Cz32t_|ewX#9JnU;I=Q8e z-5GX|tNG?k8S|?`;~`L6yzEo`^}^Y*L#}MuZ(9sIc8IZUuHT!pFVDB-m^GO)D&Q3E z*fJrw)EBluBd~m~tKWSF8jstnAq=ufw?BL*qZ;cV@>w2lK-G><%b5GG_PN=a%SVn4 zYt8OK-dMN=4=v~tD&U!1qHQ2py?)BK1!DK1i~S39IS$WDh4=?t?#%_zSwP>v94rONn%1XGjS2_Qi|e5vu>f$>(gvQ3Z4|nw*1~?dajDQ(asZ!taq~1B z*ve>%v`B?v7|Ljf;MjaMNGYEaqHBhUEB3U?cA1`&qwnJ_$#$@&7{?ty2rO($SGJDJ zbC1Nr!$swaFMSt*QErZ6`;D|>c1`odbz5H2Nkpv^^ znkbZ%(}iK#uVL;)A@+~rP(n?8qD=xRqK0fen&vO_*gf7&)-2V{ z*57=qn{26M`!z~VHZzOtV=QAiQ#}uXsTsSy58*31*TND4$_2WL*VR*~wv)Fd0;|xe znnr8bL(%sK-(D5e7&zS}9(EyNZz9L=tAkz09t+hW!K>KUi#pEjy>>+lBHn`V)3yg_ zWq#lx4ygJrRo1?9>STrAu2<+ zRlYdlh~Z2*kZ-%cpYnY%4$YWL-$_nRui-f4YWMHg`odb>nqE(coocviwS8pxn_bFi zs&?HCM0w~ve=7B@A@?e7w=~l=93vCEwa zW-W%M5`HtlDaN^5&^{Jwv3J|GeeU)4L{YmtXLO_Ngvn>H_vL3daH|8BLalEu0XZ}- zTGiGEpMi?tFl{bD1nVfhZ6JcTomG|VpP2iTz!4OO4(#t%oH#LKTp^ZbGy{g-uOZ37 zo$E1BE=|AmR$#hskvfrZs+Q+WYgC@LtK<&@# zzgzwl+`}P;n~T5ga=nI{PH5S4A?@Uo9yc1@GEg5R1zTeL>ph%(g&>kmQYoFvU$3D{ z)PstusET`K71k+!nEI7MSIjjx0WWJ*5>d%=pe&&TUhTsx@6Y1jYgdGKs@J*|jY*=b zoySO&-Hqx^kWup6G66-IaDp?OqQ8p!&ze0St}_eeNw4%7erttN?Qu7MUFps!xyt#) zvni(epu%EiFDvb51)LgDA)kb!Ka+Pa@8-L4_p?=Q5Q@#tI!86xaji!L!!i&Ejki%msW`guNkcvr9WGe3R+U`t4db{CAL?7R!tK zszpor9H$+=#<|Mz<4}e~0>i_Ji!Rg@V_{s0>|-gC>f-G;^27;lDQN;S7p59uZwf6o zD91odV4?nITIR&Fi4*H!(gcZ!P++Xn$fA1ifbO^oZdas82jjR+*(n;cqI#9g(&(*B-K3MBGP9aTQdw5@o z5O!lWxJay7;K=Y(wBpErpKD^MumuCJC)9pEFh@lp#aLa=@OCNWg8hY)LLF-%Yx5zM_Kj>!xWpXKT}!cR{q_ z{|qCWv`!a!$*bvGjk&sqGncjmjrxJJGfW8hGXPct7rU3qJtMB3+L$yj$El*EIAD?e zT+C#B3Bl;(%27t&)aE#JA#G;4y?->5)lR9FXsvLZxqa=68urD9I1x6x;4_?mUQgG- z$>_f(tbF|#h`fq1r=t@M=PSq~#Krx-Ed_(ASc$u=WF+|EVZJ!klY*v@A>^`><7HgW z-*SEEgDkZ|oLL+;5?bOgo$exEzK>XHeNRmb=48&x-^c3y7(0KWOim=gC+(Mc0#m9n zCT6&|+wSqyy{QQeq-tZ1UQl+~s_?U6W9gmi9P?nxm1fXn+|hV~um!MGJqT zXNwyY@DmF$_ixsJAJsK{G7O6ZQ*oRF1{0`|RifQ97IQOKineybskLD0Pyu8pu+YNI zB)CdAIbj;RL>wT5Cqp%m{e{|S&i%(v{+TS}-!TcR;N4#F`X%c`r&Q19s>1hSjsBa zZ+)J{g`>f$>r%V+7}xeBi+hIztBID(ka1M+q@w|!Awi)nImbfJHE#Ft2Mu<7Td*<3 zHlv&A!~Q?c)ky5M3zPMlDW0~lDE4qPeE;(c-T2ZTTpbN5hV|zoM1CI8Hlow-&Nu8~SOqD!7#*L|*_At|AU}&#gFWX{-Hb@{ zGP80`o9MfoEPc;m!gS4Mfd4qe2?%PDI9SSA#c`!YY|5q62FX}#_`%zLc$IJ#n?STa zGy`qO+p@n)trLQ_@~NZKPw`1IfxqKoCL*qNrL~n|Fl7e4oBq2iIZuTn)uV2jPB*=Q zZ&xQ*D{nVnO)EQcnb@q*J_J1c=^Oe0-%5rqmYH2IVtBoiVj}3j^E<{es5XkqzIq)| zCaJ_O_%2HoI^Q~RjUL+hc+k;UUXOo4QMoz}g7$2*xM;5{AIaC(`>P?@q7PzkC$R+; zAr#ERE3c+!a*)9iH6)SgjjP>p0(+*hca$LOt!o?6F29^i`WdH4azNhygv44N|1mfx zr@%o5D*(mzvvs>?F)Y2#jI|}J9IpZKF&{*9iO^EuU-8A>m#yDNeh2&HfeXL& zf-mQiz9VaFWJU0t%Xvx_z(yJGW+{hQ}K@6A(t`wUZke-19P zgt1dFx=Ws5M+>18Am+;-EI8MO>r>P2uQ_xxR{oL0@u1B>chU>hu|@{Pu~rx>iH6HwNukY zGBX|%dmp3{hOxGY@5tZ&hAEal`4iST;hSl;uwx(nD0JvG?)hPMlPYb&N zi+Bh)u`;`^cu&tD^Kc8!@XIS1tfwvmVKQCqbcS|goWg}?rY+eb&271=?(&*~`UHCe zsu*D3We-`>H|S|MnlI88Q-cbHYLIK9x#%&}pC~ly-)Khn@wzvP=WopTEuJc2%m9(S z6Pt_6bY@p(pDgTx)LGu6I1_la*Q|B-;$z@gbc{tHG>j#Qz@x!v&1MlDbr^s?X}90_ z$R!L(_3B=>^){*RwZiv)U8j~&aK*)j^iJ%f%l;XCp#4U(%8&Ai2VjJb#A#i1aR9$s z+xgGfp{S#E5BVSvc}G1{ci0ALc2Morm?Z@&sb*+|5uxbW%UUS&A$*DWvtu|5%X83O zx~paRLh5uDGUbu&dHUJg#;Q+DpcmL%7j2x}Jl^JB24^~>R8@3A@P&W!mj2$m^z@Q(DIIgL-cFBWzuYy zt};TYsOv?(cPFHAhHqcq{}plx7c$Hus;s0hwRy1$JzJ0gPv^#|9)j3VzVzE#gw|8U zO|d+m){3_K?8xalXqtLI&1}FE%`bfEmmSz8H|}&2+E}=i{dJnWmW&%GgR3nqe?I5s zh$k)WMSDfx-(U}-PFPNu_L5I#z*^_I#Z>CTUKS_Cw7m)AEJIQz2l z^Ygw-{xl6_$K_C(m{etDe_B5q231TBM@kwy#W!oaHy$@e<$lnFk*n?oC zaHPfy)(xhQlcgeIvS^tp)K$ngMMD`YDhNAxGFbtWe=S4Sl5H^&ap$l#MPdK<{VMP= z%AJx-KkZo72l($Agf!ptuh)7gy@MaiAf|;;nkS1_9n!9@tz7?banl6!HL58XJ+x&( z;%9!x4TFzpKdnY1D9rhCsmu48XZn1*G0hJbaHWQ6mfS{xBvA7!mMMDh{N@L$nRH}P z$kh8dgJX~Dl15%q7c_v)2Cf4VU?ioju&glr#iW+|jn}e@1xtKYmVbXmDf1ufnh@Xq zf#DV^%+S}H?KPw2Y+=+VJe^Ym?oX6Ai{WfFwNDKS1AhdKFXfK1WXV~l>(W^hFvIn{ zr$_P=G)VDz&e~RYNZ@V!qqSK6QJF6Tiz+sGmGphYIa(r?5&{)fHd^g4KOiB}YWeuG z3AjM5JpeFl+>yhINEes)VY%TI}5 zIl)j}!BAUq!QG*ZQS~F`e=q=QphpJ4XQ%*%5iI3c;EFX2~L!B1&YG@$DFhA zU$SzQGWCz$bLg|k7_lw-FE0L~A-Or{*S2&yI$8($T7KAWs^l1>QRkoD*tux~6l=|* z(bkM{wHX^L-=Ngre9qAKhvOcuqX_4dq@HaA6ol5a9Iey%YcYKsK6wH8#2<)iJb zR4xh=SPIb9B1uwVV~`)<8sQWJc@p{n)X8;ibwI(^t!p2ZJJO5$?hYJ2Ej;zxh0&rt zvP2PoBekKQGmHN-rP#(Np1!!>voBFcm6hmWa1^zxv?KEgkYP|P*YG!n*hMl?1LM^` zg!4eQ40RziB909H(sV`(q*+4l7E$v^b4S*l;uMWA>j^pTVR2ltMAaPPgKPgTDT+H; zIv{O=G^gi})SB3L;m`NK5!G?eFydLNog8I~3Mu}2x8w=9$<4;OAVsxW$0=w1Sr{mw zUMD4GjemPwO?MUsti(xSwg64}dFlw`b)MiKyxifKlWx}Oso_O(WHCKbeb>HUk0Yo_ z={RFx$XX#%=ELxj3m-Z;ogLf1eLe4b0#C~c*N}=Wx&J72V8yMeW+`o7xG#}n?t=gD zPlDEUb@CgwrF$CkxRTq#yFLvYs=~;zprsjh>`)fk<$TzGu*78OP4{I*qz_P7Q8CkI zFX)O;YOR5&G#2ox2DA=_g9%D(W!X|D-MVgr6Ro8{nY2)I_~~V5XJz4h>0^5SX#LqO z2)mwz9J`Mr?qP2}RC0aO|5!OYTh%&+wWcP}b-#bTK%D|1d1g!emjTDUYM$}KVJ88m zw#Q>@RkHoCo@k3Nc$#-_SaG~bGw!Tp!z zwp!MJPzEJ1>+`jZZm`G0&zPRv-6#wcspye;)4)qBPv0#y|4Lb0?g_4NG-b&Od|jTj z{ontC22hAEH>bSVtJb`8(@At)m_FOyhbr;2P^bM{) ziK88+_|a_N{(U|=5=@giQev|r{jm7g)U^9{=tb5O4=)uqZ-jQ#)l&pF(^y~M*ch3L zVk8b|iSqvEpOfd!=f~ZIl1Ru=MVs|R6-%NjI9K#{)O(w93$zcODN56>(FufR)&!Z1#YXASMc4ceb&LLSVcILP}YZyXzm;6?q;r@ zw2NYiAg4Y*6C;lhA~kahC@tAnQk|n+=Kkq^{XUwD1~ufv27e1alS74prsn+mZ$JX0 zItPF$_qO+fP~jPq$5t&J<{aWbZ=) zK2G1ZN6Tq25TQp%K-h0(6n4_$&?u>5j*t6qn&YFS#N(*KEaZ(0H;oKUq@`d&X?WnJ zGHId-RM49E78olcVKqVC%#8mhc>K%szI)5FqELqG@wl-cRDBvu0f>Kve02brBjLg) z?Ov80zR;Bl!q$xsC8xu~)ERBg8L_2JnC94(qige^p~CrIFD0$Sj*Uh{%)~H$mVZEv zdj~6_k#7}nw^e+Z1kj+>d^ zc|sVguI_x3*m}?Yoeu-gNz?DjtdHS}bxi-0X`2W_*Nt?y+9F{Xaiy3|Uyh8fKMOx> zkB)#27-@SCcrXmb@W}o|_LW%<4xNTsK*?S&7qztALE0JN`O%G8yVY2mlUaw_o`^x` zA?X~$EehE|&z;f}r_z&qr+E&fyadXlj61x+llX*x1fYFSGgXX>Mp&4-)l?5sSkAO5 zO;%KeDH2hS!diM9O)c%Gi{mWsTr|nYqs4l^Z=P@Bvc)T(!%9nScYV88Au;er2~ z{6NbQ?4j_E0@W-6^c{EMzwaSi7k_^4$Z!ZZ*L9Jh!7}cp4K3@9CtRNV9wV<=H!xbw zzwPON@{>pQa7g943;6G_CLPfGwi~B9(WZz>j(_E&SW0nhz1#$~ve^UV7P09ldPu@B z{>Q6hF9iiH^>9Bnlt}0GQ$gmW5n-#UW~MGc|NLiQdB@WuB=75oF=NyJfkuyT5scYC z$J?KC+5dm)SN}LR9J)?NO}b$CQjNrIg(b{VFKO!CyiX`IzVA#poizEB{Akt=;>;=D zeWcsJl?oH&+anmEz>JsU1p2$H(X+Q+V1{=7T-8TE(i7vPTSmg09-=82c#i8>VEv|s zj&jaYFgHLNa(R%6cV#uwRX}LU8D|>qBE3WWNQG+^wM5Id=4{pEHU#vjTfYp!G0fTr zYAKcfM;7UXn(#AKy{3JsJ`4ij^WjsUvpg>1$N8o<=PnNsM}K6Kywy`rc~0$kCRe#2 zIgT2ZYt2ZBnG6-Xca>>Cvivy2BF>^8!|p~2oRah3#K!_eKT<^3>j~GC8Hh$j9|>}y zjUL~08l!wGn5`>R8#WovdYUjpNkx8!OQP#oW1pto8tt#HaPvNzYbZUz!*!I5g4rpyP#? zktN!hs;^uF2ggTkp9As>C+KmgVt%e&jfn;PrPo)7u@pT_B~V}|5em2*6j##Wpvch} z$Pa$owsBXi6wl3aj^mWLKz7L{;-7vvY&PU07m@xG@ft~2i0MJf`RCI2hbTs*SGzdLsl61g&lnm0mzx%@UAkT7N0q*e-c%^zH8@N;om&b*XOfcN*4KvXD-e}nwQFP0zZSS81XYv z!iOpHAk^eR0^NVhlPR&JLVcrzVpB6`H5FsA?R&*wH5wLXwED=gvEh!DMtE7MVmmK% zSJpk~GzHW5b8M_A=@OdOSg6*&vc9eik@r%MPlll@c&<+~TKytC|Ai*Gh#q#@@(0lj z#V)eO!LQ=lNMe^4(5KeCkXBNJ+@Z@S@pgZ?Z`tVXbwT$tM2gb#k$^)+ODK3Jdg}D` zXk$0A*63@Qy~QX;Xuxc5C!5#kac4Sm{;%ElBxrz5KMtScGwL4>d!Eg2r>emK6d#=W zhGM##XlFwoha(*8G@WN^%j15El_;14jSX3uN@nH+@*~mp#Z@&;CxDDnf+gIy#x=AX z@8&$=Iqu#rUQX^-sPN?4OtmF6x4f{qs;+DN!1s3bG#{T&T8y@UdRo)65Pbb=mAEP* z+IwO9iK=999PV|u+j!jx9DJGM{kmxy+|@$}?By+S&M;HLK0bEt72|SNli{E+uB~A| zcXn9tFiw_wzG5x^H-dd`(X=K1YjY&_*K!XV`ymE8W859Mx>I z&pGzw`}58sH7IYD7EPrpPGt%dwHF)G%x(jS+)5q`d~GWcA{*z~zw1ZMU2;`ZXQnr{4!lzBlGuj9TV8 z+fxkX1T+LeF8a?+1E@pJ;c`NRYxi8BPJR7{)+Sr=VJT+*po2l--R5I&oW1>VxLAk^e~g3gZc+W{s6m zuKaYtu6I2p_AQ2A)}6%}YV2CcFk0s4=XJCfbhK3_C(dRfs7ER6jro9|aQQW`5bEGi z(3O!TgvnaXO8>-+gnk;1#$gUcmr$BT8!TgxXOsUFH}olj2nj_(=2qMPH!&rRd@>Dc z9HsFhRM3O{sk6xk7SwBZ^DuU6r%}&O8BXEnLi3j_tO`u8cCWVQQcgEA8I@#^b`w_d zgnzF;?2Q6%Eh|PJ|2~Cl+$Ul0>tIJ}TBrKl{uf%755-)0Q~65sz|lPgtJT65p>(b< z%4Oy$PcMbyTH1VK9*PJYShG(YzmXMiCKF5ECDX*h^)GRhktA{t{9^O1$i6-0U?{Gj z^Ic<;mqM`v)%}XcxgxJC|*o{Qz@?a%(_EVMdQ7 z+f*8j*qo;^v#l^AwE{cu(j#F4w9#=^f8GPZEHGY)P+NI+hY3Yum_m+Ig-Y?w*T)Sq z<$WfWlTiTJ5O8NPJ>!A-yEpJrd-hum^+y_K0vPKQ3{k>EM`xe6_*rxwZL(V31)yU* z+&z3~$kbQZc%>;s-NoXNO|sW4Q~ex0y{ZZZhq4+RTKQBw|G%ts%)8v~o;-Ljr`3F9 z7#iWx4B02gq&YcXrBWvsWXSIRE&XV^d1WDtpDbuP3gE@qafU}zFvLJbtzD~TyPtQqLtm{&14l`H7g)p|U6agY-wnLe3)VCPU%T@lN8wKTCt;7x`wfL;34 z$`7#ua^ufVCbIThZBtKIn$g%~H6z>f`SCTsiop(Te;U}0Ul3I_+rx@QZAq>*VG;5?p zzQVjY_4j$4bf`Z&OB0&;7KS_rR1OhFeO$yD=$Oe(I`Lu!-PAHSHJj_}&$Ko}As|r4 zih?o^Buy82g#HXIo=YJIUUgZ2!EJ#Jo17x1l^i|L?*(HXh_H3-`(bfn7oigLp!9`C z|HE!cB^#Cmq2V$b)gS_m7DB+)Sch*Z-~;VPiZ3O{4HXs+I-6t*uyNGtnVdB8g?IuM z_~C@VXk))$e_eM{rhoFU74wOMI#0Q+U0Juty%uh?972=W<>|j^PAzxmCK96%AeDZ zJDKH|!>&8nHbH)7l@e^yE z5soXE1PzEfS3$uKBkXx9;1hFl+{R;Pj@=y15jGMr63vvSWVh+uFm+q$Imua zMMKS(zXv$(7A$7lA#4HyeJJQ3rIF)gGG={OsWpEAbx-|nA zDDB(VQIJOHom@77(ss&L`u>)IEM)WvFDz1BYywe zUms!2U7_NWh@PhY%<9a->hfddR4-8`h|`$D#+cB)qEgiLpzmsBxL9)}DBw^S|MJlq z0siRt$kxIB-uj@tgji^$bb+aTW=2bFdX+G~l0X%M#L>+RXrdG=Li`T8n7dAY`dCQ24j~m`8U>!%px0tG`IK z@^-XvYeNOGU*_iG>ay44K|yiz&GslO`1H|_=JFym!N+I-fWUI!og0xlKT@PQNwpj? z0s}2gS1aGRDv#*2gXw3@5G4D~<7HB(TsG153utwecq}@C+_t8Xp*>WKy0uO+ItzV|_>rkgAa%lEpggjJ3=O`&L^Z+nGdD?slo1#`)hft;jSYKYQq9;if(GR) z^Rc~A^Yv0zb#Vv*AFp+~PU4N%{DM|BOHH=$9C19OW#e^};A%2s*?%O6;!mniFhcb< zr$=$zU|MR!9AOdlGxLuo8*J77RM* zYu(_VUtFJ=-{9g0#;!X%Tv84p^t#^;_9vtrMx<^AUhb_OAyLd}XWBewmpd9Q^tI?fxyut~OsZxav)4e-n!7 zLzfz((v)RIP-+{h7Y@sC-|6H1v0>h3hYdhRy6^40=bE*4(;_MKhd6z0JdItNObvRPcR%dr|F;A64*6vnx3UGBMvY zYug}Utdo*GUFZ07(k1{Zt69XBgRx+WzNr6wm`0<i!Mz3~+5c(xAq9+Ba{ znhL&er$BUSKO{~ zKJ?K>e{U8!Ya?o-PxB3mn9(?%FLts9uMqTqchUo8E(hHwb2myXtTK|=kSG@UjJUVkZ zi&aI1C1oP9ZD2I4lsXA0J z_q;bwPi>uC95W2`2ET}-5A5ENo}W?zHpxiZefLEho(t>iM-zq5@&b=pDZ;I+vaHZz z!j&eb-p2hgfxBF(SS{qDZAN4tKTY{7E&^c^D&OsLY%nbytf>Z{U z4Hg|$k+3LLlD0H-QIh7L5YtzfN56-*`Pmp=FNgNToBh!ZesuG&M?!!LDXL!@(RI;E zPbU?f_I+L2R;@LnCQIZVA~G{e@#N)MBPM$qNfc96Myhdg#892u8*gf2g%L+l-+4T* z*~rMvkRih?9M<(FThP@u;dlM(VS0Y>*T@K3I#*g;H2^*OtrT)T+?fW5hzf*B!NLmr zUd>{}A zZ5HIq4nD@eoNmX0=hrj?EzRWFi%y(C++vJJ=UGIUf(+G=4BxY#RK_Wx0UWcL^2wj-L{P zz4tl}S1ioXM92u!Q69$6yS*R&)(&V650E82#s}ZtcDF}rGcXe^~yCf{!4uK zV4%z`sPIkATTzoAzf*v}Y;aeg@&2TbhQc46#u5#l#@f5V{8pHIK(Fn^2f!IAJtMqE z>x3XlTo66Nn)=Yy|M?ShS_Wr20|vWl;?#tB?V?JYxR4&Y*x1y?)m|4+-}5SNcnuc1^G?TJNk~Hc z(o0ArIbj}PXJv0*-)x>=+WY`HMP@9GpcKM$b|$A|_UCDsRp89OZIGE&$3^f*;ukt* zZ)spqhY7{vctK||gZapzTCXET2VX--_4!#meQ~$l?RewhARG_NVQI1eC&4I~-=nOt zy1RgFzl#Bz0-*Kkv&$wC%|C2;t!^P8E*97g%^3LzJa&``lPA;kRZ03odPi-I} z&(5~!pwzqmE{9WIvq?;5=i+=|XLsu-k|Z~Qv89@x=QQ+khV2mdE(|Xs0ZQob2~*7N zHyS@d>H^AV`p2%MQDDa{nbiP}N8k0foCbHJp8R|776i04gI|W4hl;fwtqKMJacv7T z8*B4(Z9RmKvtWN9j&!mVt0_6*@OtU44Kl=E$iQI?KFJmr;R-zc>(_)4C@+J|%c2?0 zhZVqe`pN1?D_1*wU16lm$?{>xBM90`ij8=9#OEzjYF&*xZ754&J;7QXH^mNI)V}j< z)i-6-TSZHpWMiqRF0Kxen@F&fRF4`i$x?1C#j?K3OQ619Zqf0%rtSMTp(rjvz^EgA zUYlR=>*!77^f>Jwh$zNe%DQu~G&H-_XbTWEqXKinnH?v|T}fTTD=%srW11x{?lnNf zz8~fj!okh>7^UxiS^PdG#!~5O-%;rt3YA+=Mad8yLaB;?pHxWLuYDHGa*jKXXjcnGkzj_tqWD?^ju^A(%O+A$Dcbtsq2E~ z7HSKYr;-dVNNmL>c8RxqG|BhU^z=u6!32^6UItQ7cBZjO%;2X_^L(-`(C=5}t2~?9 zt#w-!;;KHnAj>&exy3&?&WXJS7;BA6RN4_B;>L`|!AWKs_N&1u@$FO`J!6@(jO>C6{MVAF zj^B-Qc@2KXk6HE~mYenD8He`wR5<$>v(=7#P*3^c(x;ZI?}ebVe7y$afJER#`;*P6 z;zw#gYCoW294fH0W%^ zSmTCwT=Nz+2`{GD*j8QF_Vz@P80bbAiw{-5;_GU|?s6qAUIi;fEu7!@2-kqsv~+d| ze~v2{*+ec9jlUeWk6-+)46}ye|JsHJhjJ^`pTLyV}Zp z9Ii4t+@rLCPtTbxksZw5WY6RKy69l>6e_f^51McDn7x5@MK|k>^g%bJ?4H_+f5KW_ z$otlgR>p>9ES*jVlHNIl4?UAx1|UMeGX7TuS>;eZm7fa&-U!{-Cp0rUggTo-cabQYX&A$reW(~JdnUTh;4UG#@oRY z5LdmEmuz>Tvs@ZGJ9h1S`+PbXj~teqf~{con3_^(E1ZC^-_j8yzo%y;|LK+$Tjvuy z&-u+l(clRup|uG9(xD5y3OL>dp(bl&#gAUpW`W9%!$nf(%)*|Q13vu2hMNk9q`tS8 z_fnQWGIi41{k9FX@&;|^JgQ)V{xF(=Ud?mlLQ5~?L3f8&k20Z2h-pL;j2CSNxhLN2 zxzK6|IZ4*@#aR5HkD4_COtc=Ftj;TBkz7-uG5rU8NTI#X31?+_MHv(O_$vR>HW>h- zyFiu1fR!zhVTPQJtA3er0j=9B_Kfn z>c2lRDIGkqBc_pDtYjMB4Bm~u3VD0Im^DnygV|bV)ni3# zeabXG_%W27QLGB~$fS3UEKH%Q9g#-puanFsTtIsr^QNF z)Bu8g&3hN&^}jE9?Hf`Q(!|=~70w-deMK0rz)4CSyyXxYJZhs(LdhH^yy;jgWEjL{ zCHhWTAMZ34G4+>!EjjvRx#C&=*Ht|C9|}&F%+>umuMtjOiKLPhA~ZQ2T>V+MMGT4} zaUED!`74RyEUW*IsjrM`GwhZ{ixq2ef>YexT}yFycXyZKP^35nD^M)BySqbicY?d? z%{ljc=iYz$k*qgs<$3nrGkazN)lV-Qm)vet?04g%7nv99)K3h^OAZ&qlxIhtTZVGuG zuO)Pb?ayz%KAog9m?l)$-?bh%Z#(^1*und%&z)a`H@fR?8dF-7EwFS0;RaAcsJ@u2 z;zOSf3@(UEVO~e>Y`)#2KadFb1V%Yvjz=hyDvG#uh{r<7fn2E#uk} zX5!0DSH$*PX?KIKyu|5kGq-7K`+vj?KrtYQ<5kP_f6w)Fbr;0H%>IEeu{0ZXR7~+z zCNWavvu{aIk3xvhfAHNZWOu(5=U0gcw;}u>C2u}ZP;+&Hj*zh&G3YT#*Vr^JKEkUr z6rO)S$y}Xc&6UAD@)}{n53Bf$t!;8;RmtQ{%#DN>QqSZ~Ly(uepr#g?-@OCUXOk)& zx?p^GR+IlFF(K5-84-c~jfoyaiNu*=?)r>_UOVLQ+msON^=R?I+1f1%J(V8wV$0QEQ%|Z5 z^&>0BP_RlW??T|Ly5!UN8aWO;1-^`)p?VZXs444M!C1v<DvtEPsVI`PK}cefNwW z5G)l3=N~Vs^bGZMu-8;9MmYvCdU|9ysD?TmJmt;38F1>@%4RB1w3my&ra>)zmZmzu zEy?y>i%V1#I=nCWY(CBy%_=DxhnjL$9oJtui&c&7<@m9An^D?V*TD7s{9bqiF&#A4 zH$E}v>T7FhHEv$)u%LjWNx#uX-6b0dt(hxXpZ5kN3P>wkEI>*XDR z5JtuyJfhn7Cz$52R!?1Ze*Pa;Ldw;Vx5V&0t&fW_JXOf#5`@d#6!5lDqC+gYItCs? zWPDaXGScPoxjS&Q6R_pWuG5eV;)`s8DGzH^476vK)uw3JQ%1UQI5BJW^-4)oTKN|Z z@eAUIiWP2&4lmBF0(dysc~ObE6`neP;gEr3HKoG=hgIgWt1t>hAq7cI-M+HSzO;&v zB)7J%h_#PrW0f(ww|~#=M=GW)157T^@ZXM(w5z|k`~29a+ zn;SZ5I-5!nrdjFpi}i!gDdfeRU5;Rv^ zxJJ-9f3f1iyqbM@AB2NHyK^tacGg(YRoXee9t=00)-NCAV1#msG9lksI#o6eVf|4( z_DwspA)N*|_(k^D&l&)|{#SN64rvop;G1#p%Xh_)AD8`Wy~F*dSK@-3{x{R|FKqD=1D^Hy&Yl<3vsoCOd6>g3m0gy zlOru*RbAdPYnAn-MuS!(Ar}+U3HXuEYdIJC0xx6DY+d!?h(hk=A6|~=77*w_;X)~; zcdsGH!pF|d#lE;Xd)0wrDQx0qV~hlSetzWhaEsKm%E=?d%F7;KQ4zxONnY7e(8uLZ zLrYcN-TIAX+re~9D35stZKmF3M4F5+s~%-kOPFOG_C zSwWx6I7?UJ=kie3z*hIBvcm1X9%A0g^?*ySZ+K#@+jqWYwzki${5GR7U9B#RII|T| zYuVh)F%1nI&ub1gcpBC^)BUkjNtz-?MhB#?7fe+Cl_)% zyzg)G9K>YQtz{`QAiF=IxtX}>73Q#RDr8WRFae%-2YC9u@!&7zi~eNo7ej`W_rpOV z?MZU9i^p$EaO^;q)w`(3g=Bk1^q+@DO^Pec@AP8j8>dm}$)5rxO5d0ij3B(aopZtl7UF~e{cQYR} zMhCx$<#h`aWw&27U)<)MH!0jgHkop=i9^=R%t$4m=cBWiyk&1QjUN5YZ^*oU1x;O@ zpxTbkd)$jG#iCh(1Dv#iz>h0%jhW4j*}6P5+1jky!*Rm-lT+wEF+HB^K12h02FT56 zyT7CzZn7R%l4oW{eSHdZg^73fh(B`Xi&9V^Iu%-)L57w9C=|_p9OZ7X3l*LaH`V2} z>MY8Nn=c1G9-((1vtkSY6bjh`A9{)tB>tUp?L8$-kNavm81Mw_#pZ+{tO9qA>ZRO@ zNn5kx{w(4G4ZV8ymYNjrIp)C~o z+Ad!PzmCyt*)#N8^l{K-`me%&%wB}jH1o8koA6G&Ia<-S>FXTI8qz7{iOVAL5TRol z6qLo){A>KrqLZ63Oy08Z(Y4>Xe<@KWWTj`u4P_gs2VZ>2ciCTZ{-)gM0xt;m&sFMM{FN1tFC1wC)a^4H02@2+R7e+$r7N1!SaYP)f=GeNEZi)?U_ z&T;s=1jvqB2)74VdRRYsJ>+~(Gt`*t8JR9_B8gE{sB#4KemFmYC@nL~=sVDKH5MZV zi#Ix4>N+ZYER1OZ@s1uJnKG7CmN=lTS1wOnN zLY|MXnklGIym|J|U9jwUs;KB?i0WC2>SuRV2T8$lf~DZR`P7;u%tR#aSySvhMjLgSJZueC7pSUc~6C1?`H$tio~U~%=9^&_?t2A8MS)LYEsX45owC& z>#By@2lqVQ{l)vIO!Z;4*VDCG9d0)Uny%v7QJdgvxVP~4zkkQq-+Ri?b17qUs*05s zN)>3x%axpXh^du($b@roU)r1U=F~4sWUI@wkHC_S)Off8H15uH{O@W2S9sm~ud zHG6v94(sP_!Iw`d?pN1RO9$|MzUS>xnGQ*Zqpem#V9D zbVr9J(`xy?uKyCPvwkX3`YSyYf;a zsn$+#7fi@x@Y&X2k#r~x-Y+?^P%D^@Zi^>$Ap#z}Q^L8s9sR|Sj)Hm@$;c1WJa)Ht z?$B>4?<%MWIK(?!Deh}o`>XVfuD7w=cGvQ{x1HEk62oWYCAg{~Q{1p8nK=yFDO~0Q3B+ml z{+oU1T!mp14u^KODKagA$hAXWQsSne-FoSyBR4vAvs-SDe?I{8?l^mn3v;6MPi2{u z+K%5)3kX9a(b^XGF}y`bElfx1i)e!S2z@)p@LLBT3@e0C;X1Wy988c~=1#ETnZAu& znE`Epto@H+HhU`U2t1ot^TZi6hcQ>~x(JUqwsT9A6ELl4-a=a*ymlsR6a$IYBDZ3N z6H~$Se!F*qz!7**^ceWr?O{Q?lwTV zsc%|+dKz^^ozhs0?b-pW`Re%N;j@(GVcGi?f=`5M&v}6q(>kirjZ56Bb-w&Ie)?0W zC^zgg{A8Zo)qmZ9QLlM+mxX&n^}tamiw8auNdW zs2xepR&KT%bV&le<(>Gj)!`XVuM8cur&jHnUl=|M@`#E;KyX`vpw9&+i+bb12cR7f{L`f{G-#B znJG^~{2S*EJZ{}u3hPBNHFuXTBXg%@*qGmMZ>4xFyxo7bCAo`_DHK` zYv8!xHIqY#hUdD{bpFJ(e@{LIZk@BfI4R;N@ZC(tmjH0?+(<*&2QYn}#&D1FXB${8q7CG9TkSR{wAB ztTx&<-f|S#JuD-Mop# z_JTv?IgIu-|LOMCm(s9f_iqkRD6fqiXyO60HiNcM(BE3}n~vP5!Xp4r7Z*ajg;uT#{XWvIkZIm|8J{S!b*j; z>*>|d8mqPY-`@*zn=e?Vx!0lpYwY;Xp@zKJMu#4QZv5YG_}?!AvAmGTrepBau3-I( zuPHUFb+Eu~%O8%&^vOzCdvb>X{+sZge~ehFk{7%xVa?hXHr<#R?2wzkb~N0+8j_Ub z+mLHEyq;`ocRFan+8EoGuS_vlCp)?V{T3~g;8B=y+dPBhbz?EakS5O5Fvvpv5)KM?)wZIH&G^;pn*PGpo%0|m8V@zpD? zPm5Y&)=fKAy958c@TnW)xE98Z2vJ}UGSve=>Z6#OOGOafWIq8smd^5N4*5ogYpF(p zz$k)n->s$n&zO)4(Ez ziXY`4hf9GYX-Y!pvg}U`o3bj#tJYlk2_}jB%>n%9S|f)#j049Y+G+Ka5`u2LsNQH# zsAUGk$M%R7UA9`k;=o7U!dJY!`*5AEAwevlcJ$!;C9F?LK_Q)1K8TN>JR};l;l=hl zq3*<|wpLXYWP?5`zdP|bufE__a()~vg>a;-Iz(~{Or zlcgx;ch+D;&qPdVv&LWHT_Wdw^=&b!qWkw5#g}}pUEbObt0mFgzm-WSeuYHROsR*!3pK*dONtpdv`P-nl zbLy0itD}U4e4mvpe@#$9!NBE^WM>cof$AByw?i%Gs;2Q0)DND-7w6Al#m+2!$R0Nw&Su5TE_XkROYZM415k@ zIwESg1&98J0@^tVa`L~)qVS}bHkI0#-`fOc*mom`5_H)=$Bo{Kd}}KeN1Bv|h);K>x8_S^g?r77Q((wCsfe^RG7f_ur?&{p~B-pg%{%c>Fa*X^$Pk z0dKb^lrD}=aDM#BBy5WbxR=Wm2z3)RIahtFs@Ez!Veo)v=`?gNBFjc#!M>Q5eVf)GsRP1Om(J)&?>GE+++9O8#0+;&0tyi{m$l`Tf5(@`Pea}l%;;ht z!CE0j?#BBDLnu6*7svp zxT~rBBzX2cX;Fdybf^1KjW2uSomZO-kt~?B-TnHI9^rmPLWsWXsC>`0jR0a&JGF0f zsd@;ha>%e88I%g%z8g|}84?G9eAX*H6^L$pviRmb-wbSh#H^)8Eb5l4c4g4Syw02r zH= z-?%_l%N~v{I@vU4Eon_183ndVMbLgz(MX*ue==cyxT7gl(MYUCep@-Ufy=7|VVf5U z^SG`#;C8h`(6WUFyWLm5y5^`H+m#bR1a$YtIK`&&pxQpx2~!(+>Ook>9$82>a3*Od*ctdIwXmx5^}5RGHV zYx*zu=3X=%sa0srh%xZuRcSL*u$TOn zTjtzA&MrJDn69vEkx&^8zWU;`hMbU)-8?G>gE?ZZ3*R`8#xCd6>LOL%On;Ww&(nxO zOUO=5m(p?TJlz&rKPWLFnlK|XAHLM@^q@~rC6`ZeZfCTppAFZzc+?A(^wjhG*JbFh5Aa1cWo=y!HMTQg!t zW%@O2#ptqOH)70BD5eH<@??%OxD%ztfyTi|e0jnr3Ke3X-giKsZg4x;zJqO)%4Z*r zUvp~Fe0LMZm}x3{M1@a*A!Qn6)qm)nPx}U5Y(lFORI)9AOl>Jln)2mJ0?{DpuidcW3 zLQ+oQf||Ia8c}2t@_x~z?K@P;WpZ~C3QGU}qN8enbLoRHNVV~DTr^lKw^Zf4-LN-W zE-kRAqDgOAXDH}%y=9!62M)!v$8O`|fbWzOv}Es}yTO$`Gpu6N1uXy-Yvu;B8U-AA zns~GO#%i2+kg?Rwt3nUQy5@A+B!r7^7}~IplQ&NN`8e=_>Do&SwbmzQ`xBup3Nf4K z`=de06E+fr02F=yTR5>GG7o+tUmXG++cGrw2598>t8467jB%CCyqXBH#)x1rlU}BL zANbv4H^_lZDv4EOcBDaC@mXq!8jLI2CCE)V{`84N(Z=L`nRDD|IOL{$5%M8~T${J^7O!6S%? zIB5K-y_>n#P zUyQ`ennGkzW`2-Yti5zhtZ~PgLrfRw2m?yrb6EmvMumgPl5+MN+d~a!tQCRLtxSF& z*(C&_p^VMw*|3F4z6lOD(yZfA5V6?MbURxm*$Zt3t{)2Xd(2>`Mo#KEccW~s=yHoE z0xiN-KgY(QVV1{+b`6+o$uJHnx3ULjHRqGau-1_ zbmRgWMzdOY6(|PV-Ug~qAvrI@L2apU6phr@-0q`oa&aiolJ*~YTmh&@u1zRYeO(cM zC1$4kI7BL&LKhZ|a|@@9-;t2HGVLMa#<+iiHN9K?hS!42s!6v?|5?5O$~7F18%bcJ zv~jxk$gVOv^HeAj%o55Mn%^CAXFDP%QV9m7>gL~MGq-A1j4S2O{Io1Ey1@sosz$db z@G>t|D%Z1zf96aYn@Gnxt#Pb+v*kgE+;w70!Y4-G$I47x^@kM}aJ}Ap@6czAwHK#& zGu?$k3VQm2Rp~I?ujXE)f1sifi7m)LFgiw6>VU_+M(|XHM)R%Q*G}YVXEx7Bq@lR) zAn5wUqr-i;${koUfYG>NG;X53D4eS4Vxfsw_T>|veI;06)|K?KC}%eRRvJT$csHT6 zg~E50z?RMzK{|$6Wj?M5r)dHQs7r(LXct_q-gH;7kP4_FiTKHT#cL|gdha3084!%Q zR({1fOH$X6|9H!+=M{?`j$y>0XLs*h_L6qxTGwF09tPBhajPqBg)gb>@~$netUXQ2 zd=O)I4N@6bxLp4DldA5ObFr})U=s))ux*m2D#Dnu#M*bPaHf zKwWtp6rA@UFH9UOxE4-5=}2%I#f%vr(WOtj0`*pNc6e%r97r6?A7a>58{^Zp1(5|` znYFg(14NpOTZ9-#2>55kV7dT`pw_rXS?DQ(CVoH!pH7y->U!r$Tl`B6O?g(HnDP&~ z>)js9qK1xtMI$+uk8*=H18C&jA3})GGl;zq&k=}W%d;)(%8U*W8#~2>$A922@7ITM~h-fCpi6zP)A)_llEc!}_Oodh}oauCN4bZgDkd=B{e z^-BfujSiyHxbw6V5hm1c6M?p7fv)!UVQOOp%E(VKGcqs}HY(RF zGNM1j8Ypt)WEtuV0vsG|Au=aYn6aAvEeRP7kG_ujiTKlJf+Q7)k6NCk+NJatW=#K= z1hv-7GO}5W%x>yp+Rn#;_6s~zWnyi)x4J2LDc?4HUPWiCz@eh$-qF)QM&Um0{$t_qCrh>YsBQvg zdR7AT3Fe6!wc}nqg!~irLecm!%sA@4nI%w;D#5Ly8JXF7Q}0I)G8ivJGA5s9K|?b&^^qtaFP2&iMb6k2%>fGTPd>-t4G%@lo|sq0*HA&C3Ug>-C^uFd zLV{cs4`LJh=)y|S+J5yEvoytw7DTY*uMQu7rRU{s;_8F^!%mfD`b)~X8|=1J5Vg1CVrLUjLlb*H3X)m!dH?4xnqTq_+E84+aOkKX zZ=|*zAQv++Zi!v!6g4@)uUwA&95up$Miy?xiX-=zoiHPsCk7%W0(c}~v?d;AfT?_TyH={T{3Lo#WpAW4F>rBOQPTSE#j#0TAIn3 zil16nfdCI(dGG2#Ge1;Jfxmn=NSQWeWn}LE+w&HJy$97tZ_uKC-cdM-v9F zu4cVtE1&EfM?QjNAeN7~>Fk+1Cvh%y&JSoYK7tdB;P?pXP4ByA1G_{FbyprcPIvP{ z=-lC(1TS)qF1)Bg%{X+LUqcX*qB`nz?p4KIySfwP2RYsxLWR~kzN-4}lG@}M0k?K) zv$ta4RK{i-@9Mwgc!z&Nz*f>2>)*99})_xUWBTczJ7mmu()7RvYByD*w^Bg=^AXvJ{k}djlH_Ke; zV(!#W`C?IEzLuA0h<}v~ire>vQ++l4?HB1>-E!Xx-$Mgu*Td6Yi__!n_M5i)6Hab` zcJ(E~0LnbH0TKdyk+$8b2nK82l$Njd^jMsSs3~NzxyrBeu`}UHE6dhrI9KigF=;`# z3QT=4%Ri*I4SMRWBhwML18!XADW>qAdy~9ab@9oC22M^jb*jXpyeL{D1Co957n;IB zF7nIF!Ni%@<>iAvEkom2|c@f=9&&x#x99C z9iu*QFWM6l3O`N)_1dRhy$p4~Mv%2xsO84Vzux~UvjD;lqaT*S`YL*OcD~HZN07cA zBjDNr9P568illd=el0pYpvL3q&{642D|&*L3lRL6xFaxwMAUDc7~ZcY_+bmahNaaL zr*Cl4K9!J5quODtvw!zG@l_hBZDq?lfP>@aBwk3j6y0*=7NSSm^xfA|mkruwm6Dm2 z(MhdZ^Yw!;D+#tDh>EAHu6}`w(u)pX#RBdRTAoK;;!OMV_=vI)-@u;2D>au|bbBX5 zac-RRALnDXh+OUdwFso4tQxh;(`&DeUCqP>nH?4zdnCg37pL2A#KlRb&{-f5qZ9>+ zeiy$0t3F<-@v!V-2=oWKf#8CgKI5f;n8;!jP0Zr@0#V- zk_jGoTq@~yb0-x79=arP^;#1W@ld}4 zhv^C+Q=6TG2<{&`Tk+%i8xg&hVFtw<4Qhm>aoY5nT?nx$TaYcE9N`gZqY0wE`7@+@!!iF~}3wCiD`9`U<+dKU9nr zGs;7o;z*8ctY5D;3a00>Xaf&=yHSech}KuP-Fecbk!iz#mEi?ZT`yaSd95WzBG?U$ zowpwkyBCFQ_@ULH6M>dWgb)MfDN{I1cnS59!?{xg!*#6~ zDa?cMr1Cy}`zj({NUw$$HJjvOU67P)6MlpYyPk{QZnojU_yiW~mnK9k%l)^~Aye;f zxt;phpZn7aHiEh;^X-@b87R!Ih^&NBI2=}1n$oUsnbh%+bP<6F=wP6fYR2vU>H{Rb zg>5&VmJBeY&l6ArYOfD;GcnXn60bgzczHa~mz?Pge1;n`1tmR$t~Ub$VzKQ0AO>m1 z6Zx12rmZ@FSr%d^#4=Hs4cmQeO)LW3KUJ^{82KG2J#;P8;JihP#rS@fe_6n7$nR{F`jg1t52KS1q0LakQYkhBSs_5K^ z_HQ)G!(iTn0v9~0vD?Fw))%+1Zh4deg>d771mfo=gPf#N#pUEO+y9q3$`7^jw=;pOgj*Tpw9~uH&Gu1H$=*(1@kO^B1o5@v zs6lkW5KwI>?4f1kYZ)hoL4f0`u3cSJPg&p0(}We(pI8fm>&Ss*w&zUV2MxVtqBBz` zmde{IWlX+|>AbsecmFoMr9?DFqllRGE&_|F>ffnQKAU;PM5UcoVqF3nmnCzsYw6$y zmmsUI&Tcg^nNbVJJ2VWF=J|${!io8my6wZdQfu}Sd=VCssNgjw8R!UUF?MZr^;*z6 z)v3Mp4zB^Q@A|IQ)-9yU&*xNoGP>gGTHeIXgt()1L;{6UND-Puqx z$rNc;K$xglCkK&b}JIotGV0Zo@OWaRVR5ag+-IiJ0_<*hEs`;UBQJnx90yX?{AVs z+`)l?gF=LnhW0OP8{4i%SCEenFVg4o)znp$R_$XxZG_a%Bnfi+XH$#E{fwNxO=15Z z^LKey(}%ZNG*0%Y?(*s>U0yG-fsc=MqBs5h$&`7-X(v?yPI(}X=4ya9A0M&&-?Hzp zzPf}{fFlq7(+{_!G5L&|Wpj$q!$Q0;L^A!!d%&U3ZojeQEXij6x39fwai>Ne@wiPNatT=<1qXUT{>%A0MaGw z%AEUZ`5UB{X(lrp#_nX?UiQzOpJ(LZdqFS0!qt2^Kr5C(9FbXm4X`-PgHwu`$<5{` zVz^rOWBWZR<37J{@7^$U?=i{9xao6?Rm-Ciy`^OJywq7)1R8AxMUQ0gj)T$LI6EM2 znSxS-g1?Sty>8Uw(1PAxA5W$c=8_$PECPZ}BD8lc>ka#bocp3}&14?91rl`rfks-R zAV55XjCgav-T2#@H?rcjtv1zS8i9$@fiy8OG)vpHs;{fPprWZwaLLHTDDFBnCoAr5 z-hDxoo1-5yNPhLV$a=p=L|S|u(y!Vbiu1skKyeQ6ckByB z`AS?x%|?PfO~p#z;RVCy35Zd8StYKz*%(r(|L)`+M&!^oa0+nZr0RJbzsVy7c>d z!PNOJYNw0U#f(1@%uvgzN7br7WD>rMP&ADSWj#2_*I#lLnC9T>4Ru{obob=TZZxA~ zZ)moz`DBBquy%FM6c_UFDO`1gTv^S+WV%cm9S(NGu**|cUJfAjdtFX@dkTjk-HR0= zU8Rj?VQxVL(JP1f%4e0(20pH9l^X+S7$nf8>L4$Mh!vJsGm>X%N?KO$R{zc{{E>8m zPHgyk{pok@kOJ{r-A(buCSj-Bk=#~2_+O!iSrXZy+X|UP=bEnPkFL zBfnE4Pk(oB9)gdEcp(y!TsQ%AwSDK=N9N6dj)qQ)K-<|01Vs$$ylN~gODSpz4q)W(?E)WZ1o?GQJMoh_YU)Iy-L87p(--y2#ApI-v)nwKY0Rq0 z1>JwtHf2nE&g1V=kk3xrvw1&V1bPf$#4AAl$fmij#@T|^@9M_3%!5wug2OR8k5u50 z(C_ju$zlJtd()hTsaeE|s%g5W`A#CP3~9^uPGGHH=3JhY@|+C@6ntM!nee zI}C&WtWlWPvz1LN|B!%QMKXOViT<`2KOoL%%} zYSX)*D1YuUflC=ieo+D%!o|F%K|(FEbfLn_egwV_W@w^cY_6`{&c6+r#rBApOInwd zK0-1@(S^H^f0OlXBwPhP^fiG-*Js}eF?Lrsy>-rdfY|%;lo%X3n`TlIJ zbPNgmpRDCoHmXLnA`FS^ybA!7d|W;{gH}0NSno0)gh&2L!Q(gdRWJ+=u3Y@rDvhJ%u|=RF*rhj<4>+ zTc_7{B69Wh8+`F_gLv)#kgj>p4MpdFaqQmQQ2K@0>hf{5w&xaiib~d;4+b4v`E2k6 zELk_{ciwFMi){mzt99=3$7TizmcPQwV4O)Yv!+9rg0{y z99R(7()@cJ_oPP?moNdbHy=mi2omCiWOLW&!`8`FJN?)@fgy-;d25!wDad;x?`x1U zq?S@<1Y5~lp!R#Qet8>uV8%DudFp!J&fJF<&O2$6zu8lmskikc$tVw?LPt=V7m-}f zDf0P#I5DI+i%;vh2ZxyeaiODmaLi9JgnnH8!=0V2v%4Lc81jcy(eZ@II8}uE0H^%} zh21(DvVf+OV%*8+b%$)0z|~9tDDUx+dg9t~lmI6I4goeaN#`MIDvmBjTpoSC0$8i^ z%FkW`1Z3fL_1|US5%ZWL1W#r^E&_eHiK8;2{sg|?6G}fcT-e*Uzcp`~aRmzq;{Y>s zv`%e6j%8KZpC_PtPC0|{8s;@1x`8Ci-MZ~aGI|eh8opC~d3VHJ=JSoGofSqSi=<<| zt>*CKQ5uLpi~G;b_WsBF`!`eaH8BIMZ2ehS1TO9JOLAtj z)>U4g;|DTTe}Z>v9xoqnI~xx?tJWYS9#k@#!Vg{|4=&J?3rXDH6fqo3)|E z3%`H|Om$8V7umsk&$qqTBUOY4R0Rd*ur#uRlA2P??}rtgKdUM?KQURV%B$OJFWAtEj67m^rrPTjy@H`|jWSo3hT0OhiELT?h&hDrXho zG$$*&fUr`dM1e#8#KB%c?%MEU3?WIiZtyE#&5J;+dWuD4GnFL?di;f>?U5Cz%Mbih z`%8xU#y>eZ*oJ-c5j6`VatTg*Ty zPtY)8c?+xyzx`afRzWTi$097FgVsB zv(cybB?beC5>fSVFF!E}3pX>lE@x!%*D0$8D#}FHA2njSe~o;{J4i`azd=F9Dge`4 zFCC1yjGtGyt$)+Lctja$(@I4@dhE?RaaO2n$z4-T+?o(-{cUS{-@~1A9tE)YVrS>) zm*~n%OQy6nL@nk&HK<6m($rPa;_1bZl%g5`L^gmYd`^NUKVg*i73RC!`T1-fWY#6R zYY_D1-{^9KzwzjU*=mkD;_b88@}JU(U*wy9<;$`&Aj8zIvM~RKvH5=E7{LcTp&yG7pS>{bsF15QW>Q6l zGcE}{v@l=huL20SHu<}`!65hQoP{BZ6i?j0Fju0Yp_vxXzS5HI9c{7C_H0ycY4PNi zmvH;$wi2P4XvG`t|8rDVz#{#;TYCE|Ymp$)ZHe|99@*sY1lbksQ`y}vL=m6;je>zf z7O)EeP6*{)tODR@Ww&-&fXOoXsujEmvjiVN$?>xPHl&16ZOr<6IjnGGIqY9hRsSyZ z(^q6D5lvHl?ddLnyr1_6s|W@Pd9X~vUt9Ft`Z*0v1UZO3=K)P<*N%%Zv!bHXuCnwu z*1m;fAXUnCHrn@65KZo;_mYx2+4ZD}1waxC!cYRU^|#g(a16`WkBOF6|4l1tJ45KQ zS!PG_V*O3^211M91-@@n2Vh*DL3xO3YhvRaf_=h4(c-=+S2x zmu2dL#o;-10*YwM{lUKNoDnQC7}q{b>3U9$SvfZ+cYxl4YG^A_YH%zZmTfd^qoqSr(WMB3n?=V_dr(YAd$_?MHsY zK)b4<{Ng5g5YOp@k*3ws(raB5)>FpeS)1_)G?L&tsS;_fDiu1snT(*&B)%0JXC07q zk!pRvly!B7WK>$)`mc8n<5_-nS4VmDCqiw2>h^sTN2x<;ACpCd7TA1%@YcFFd4l#3 zonr)5(Vam^aY*d9WqKu{g9N77?+K*@ zFl(y=;WOTH%`DN?5fSSdSD9y&k+xv1j$3u2Bz-{;b)(es6 zbP(@Sk1yx(ad15xI7*RyX1L~?^`p?!Mb?%FJPZzxlJHium||rKuS}l#Yl$_6uJW{{H}VL5jX|%n_g6x*v}x z*un+Wbmr;k_<^=2kKJK++ReiLW`fewk+HUhnzp*?n_hP+Bbl5@f++57Yodfc92_B< zicCkB5U1Vdu-jBsjm8rIb87~4dUmq`MGiY5#BLKXW0cq^0yF4Z5c!kcT^_sZ$>`W8 z*KQ%qF-@Ql3WSimPx=wh!A8`q=@a|A>dVUp#-}#n zYBuebO?dJE1cV}JIOJn9v-?^b4tKQeX=xakm`J449+v~=J*Qln7{ERaZ}hrm@JcmD`v#yO7^)`8UCj+azxP>a zYIJf2G2Py&+L{c7mF1;x96#JqUzf_HC0U|?R+X0gZ@>EK&EBV1A3l*XQlZWuh%{Ah zudn^H*H2|t-6jfz>Q6?-FMf4SsqQMqsbK1jm^Hq>yc$;b}0;vO>4*H94KpCH1;;4F-}x7Q-!GN z>&K6rJ$f*ePE!bMg7Ds#Hy#Z>D^9J90No^~km|(ac6(foR9YUJo`Z&qPK-63Qp9sl z$0v{N?=1Cs?KUwZNh`;2%m70JW0MGBld>OUq@*)5OG|s38v-7W8DeHWiUnR*6bOLZ z>A)Cs9LHg->pBEf)2U4q8O?ps$aBi#M0X^xtw5NWpO!QK@bQfdGb3z*Q0n&{=xjf6 zpsTjB{J($vMK}^gxq#cei@Win6ard|l+fJU@)i(a#S$plq==L87%|yC%!>nRR&9YB zi%B5VP+189Tv&?w-7e_$m=Qq;Lfzv|q@}bhdkr(zk=68q6C6XL2%&Hy`Cxby+3kh< z4Yuugzl0D<+3aU`i(l7oe0WEVuR~4=!K!GAg679yVs-(tLkCD9#aPZNnM_9DIhWmL zSb+tf;4!}xjb~D6EQ&hg3Wf+pk_ken%Wh-lIC~JG@_-*vG&VgeDGEf;=n>G7D9$Y{ z&xMye8fr^R{6Hs&@qPz=ZkN*$j>RYE7NAYk&HnTJ?DBFfnew^a#k9aMy$8mpj7g^x zGeRtrNX{-r+v@59UXMwhwF^))@d1#5u?a>c)_kTqd5JTaL8V*OYkNg^lNe^fxkf zLCV@$-;tc26P9Gh0@SvMCRow)l{ywh_1rX4>nqC%0Sn=1iQi{f|Eqeg#geIvob@@~ zyu+>)B&wLJ3`Tf98tok!#}0?C=VPh?Ks(oSkXI@~sUUDui;Kxb5(=Eo4(T9-BQvv_ zM#}s?f#+maWE5gl=Jx>r$ERkqstQbPfjSh0sfDHaNTj*8sx06eFcv+-2@H5WE{8pq zOis=&m`g6n)#ZZygLeotCqz zoCVAw=R|=iatvDzo$0!MWMF5Ow-7GI8ss?U~7x%WJ99q%kgAZRReA}w%bfz49IFaDVoN7cB&{E#4xLA zl)bE`>y!Y2=h!MpIi4@``>75_Lo;U8+}!lH*(PV^Be8fzNwC!KnHbjr5DMRi6=c8@ z3ri!DQ=GAI>3Lm3&?cHw5A%F%IXWDgZm6!@+uHb`Zva535hA*sEj3jX(1D35HJinv zxbvt55DfTF9q8)W*QsgR<=gj0LNmYk{`r}u$h%)$|H*5oUhg@0q_h3Wm z%{ZDf$M-c4U%yhUn4QJ)HUTAen`pP$R832yQ*@b0QXXZu=irEPS?%EI7C`@A5R2m_ZbBpO@3UJ79OLYBy|IlCl z@U6D`+H*$_{`1G56^dsw%g*T67F4Ws<&W)keSI|0x?jQ)pGV5dC-(0>cl;ow08!JQ zGK@Qo)u1Z%djcN!+~P7=siKij$&JS2I?>l%q3*^jSn28WuXkTUE!(x(-nqcAL#6@|vop(wWsH1@=cgmCjXPULzsqUlJNmWoSW+0GmV^ zRa2dUfE}XAg|iE3G3qsv$VhapYnsYPs;WCh5j$);Asj+mPy;X<6H>al0YcaO@!Vdq zu57A!7@>gIBgyjFqX*wQb=1)Fm8Kyy^}M{q=XE>7(byUjiq&0?#N(75ffV+R?oQ9? zYP)GUOBKbiIC!1#X9v+l-Cr<;^V0Qby&B^murry}q`szWRUq*^kGj zW@nbe%hBY_QaB!q7cD1N2$1r|uoQmp%8iU9VIg-R2HAbR&+Wp9 z^SE9A>o5PzT=sJT#VpH0(Nu^~O?mLy#8h#XovpuqI}6u~u3N_VVzx08V)(r?^Wpi( z@{Py+;Yif9D^^Z45TGj2BxB)Pz021$o#=$)Fx$=ZTFCNi`kzg_cI-e`Q^Rm*#yopC z(AH|V34`OI7;D_@Ji3diHgjb)bBNQwPF z*-Tc1Iz-X!a;mx-j>W;+;Mk&EB)U#ITbmBHH#byQ`rJ;ND6k_BAjw%Js~YR`a|UIz zvro!=PYQUvFu%MIi5=L}5{bqq7Z%M+V~Pq^8cKPD173GFD}V3wi66gyYR%eWsFsE? zt}G3>?Dlvv1$fRpL)3K5v_3RVg9Mm25G=D@r<98M&=QQX%V7f$BvPrByk+yyCzX-1 zs%8^;!|W+`N=PY*#u8w|Y2X^UV$?r6+0)rxTV7UORWZG=1awkeQ`uNm5lg0ehsPk# z??!Jm{rjhn?Qd_s-8XpU-lMtc`O=CqrkOCl9Q~VjKk9C8K6~`QU;W_1U;pmINBzTQ zq&i{YfdgZc1H)tH(A8I0zH#E{kv(l?KHp!z`%!|0Tov|*5XuSs{0-p(`kR>_+g@Im1U(oC)jNK`PWbL zm>Zdzc{Vk(qsuXbke*fIsWfKQOC2^bEz5{y*W7zLJUAW_1>sN6ofgIXf>I=ODQC$Q z37KQom|Ya@f)GolrK|$hHj!0TUDa(i(IyBhi;o$WqpF&&YlU(E5N5Ngt`U*v?IN$F z6v%NTo9$|E`Pmz1eQw9peE80j!L%f6x=skGDh(d!Xoe`4K3XKy*fg!k%Uk$Nn~DMs zBZx7=h){q9K9xxJJ{xPOF0ZR7Eh!0vBT*~}`7S}a!DOdqD_c1ZdE0H9nnX7lyL?!I z%V84)p?7FxYJQRDcuGmWZB~W=p>$SF$ytC3;*VHN0sE7@~q}&R{(-?Hmg%ju<;_tXR_H9S~!r41BGkb$XBG^^z?z| z)|PGd%B+E2v^AQuBYA;;&_6V{6y`YIoO&yI&LNzVWl2#Ac@WId8%RJrm0pRKdntg( zP?qdEqBsOKU6(c04n>c{o;y^9&}ICcN^-WajsWxPbVX4Sq;^pRh`kNN=GG@C!)H?p2`Z&+a_H z2mz88BCDATfw5*!%q^~2<+lBrR4ZB*XDwJt~}Or z2ABI=@9=0 z1pq~P_CzyJoK+0T9c-EvHFLkj2)UdNLTN@;)=Ym0fu<>{1|c%0|Atg9$XL@7smvC0 znGlM2J~T5swGiId^8c~-UQcozX}(xy9#`I1b(!|w!8AMw!y(8SlEc~Inz^&P(p7{u z_UcNp|A1cgphxLJaV3TBy;7{SqtMmPtafL}vGm{&0AafUbfX*XZM7@!kIPh?JXO`z z)m4oK8US7W0zm@Zr?T>7e)*aFTt{D9%j#+v@j`ENz+y3t-=ByjQjmET837Rd!T&uVK;?Je|u420>FcSj;d; z4=`bvV&zgAJlH@3-(5Th-I|m#Xc5C;MP1q4YzqavcAL>?NM*9{=@Nn9ie-*v8HQ06MU>=9^_XBx)2ErLFX1^3 zu}D;vf+RzPu&O%kw(p-AF&PXW-x~Y)`mKVBRh9P5$LfWX$3_Ob_g!Y1;EA5E*8`TX z6G`b^Aj`&6>3m)?84Y%`ITDE$-31T>gdwQz*~~C#U;l5}@I9z!OfN`UyXGr|DkhnH z!RvC&t*&3cJzBTRe06e{UYG@9MLozgo-j!oqi8l6-#GV*(`r!^y`WFIUnME(skFO8 zER}xm>h(iLL6<>v^S9nw5`bzr`RMZ%ztBI8X_ZMDR9{oW+jR$@EffGI29DD#AyVP% zM36@ufutZyGz40EYf+L_Rbdzg>@EQU0W@&bkgIk0I9BiQPb#LMeItXQC`nXvdTLKs zY5q_otN@BHTpcrG@b$&FbMI??r1%t+2K4bEnd7ar21}N6ak?ubXOG# z0ubU@hG0;TWq`3Bb}31cf@PLPJja22zVbWu7*R!(G{d{rVwE&-{Cq;7OJj~rO;0VY z*sbP}$KBN&=xJ*S`8+QP0hf4qoN?#ugM-nNR^~u!1pJwzQVH1`=9@DdU08^ zeyawAFjo)(0l9+s$?Y*v&vg%Lq_FM-z3PY4ZhQUgh}mdRF})gtl{8(nV_23AZ%6-d z?dH=~)1@B=7!t_BY(WffY!+ELM6`AQU&}R0>V-lOeS%d#WtsIzbZ#}=-_bhU*?R5E zdqbT^J$Bp7(%Rh08szz1l>%rY^1Vk>lM73Bt0m-hcQ*UA4Q=xN$Z5~ybK}$V^}^gr zhqTU(285C#=L-@-$Y2yeqV5H+9$MV&_q}%NC?@z{KK*o1^JR*MyV&BpUX-EB|e z9Cb4*^m20gGmv8+&MnNYhGj)9M>g%cQHDTOaW-FYSS{mo3zO4xwRwx)1gq2_6Yk<{ z1b}kZJw25bxlI`3f+zy0vHHqO0qQDXwBrx3sK~Y6hSu3U0OP`#JeZyv>}ct03thW! zfd~zCb;y!DIyFo4$)BZi4Mb>tGrCTz)o46NV>r8J#uyEmT<-4uN08^LE=>s7DMEt; zK9$QYtZoFop7wwrA*M1TMw=MRoE+$}+6-UZeYhD*?CqdVFg97NE{7FD@cNmPZ=5|z zieig!3=06{SoWuHz6J@n^Jw~mFK!@Sc-maT1{(O*;7~=dsymX?mM5~T&|)iWG4br+ z1JV)RyZR+hi+Ozil~W8v*X~awlPR;=bm_z~0LaxlV|h{HSZ1|;jrjT?fMFtuR4ksc zna#az&FiH~#SCp3$8#J6h?F8l_DUp7FxCuC%{ayozZ0SSW4a%T)hv(+KPRJRv0 z8;!L4a$T*DfKsiKgUw36Yp!eJVH{6o+5^C5HX@!Ubvjhks69c79Veep_z)pH#}2f$ zG`ZcfA~Sk_=AsQ%R4a9fwYD7rR}cGbWx#!N3`y-6UZPQ!_U5phatCNv5?C`_Q_m=? zIGco!bSg&(u$qkwgUaF-6M_taU|?lg$z=0oZ$LI*C`gLHF(!kMD~P3g0qwhO77I~P zO=4gGV=)?&$#liUiP>c2IW{L1pkf?LXY|!p2}z}M1Q4^?Sjns~CY-@w;CV?^Gud46^9ZK$ z`9eYFS>YbFUDwXhmwq4KLhVnp#r$ViF1oGOtD_HZK6r#!wu-9td!=+Hn-?Vm z$JtG$R6bv;EFyRo#g`BR&kuCAIW0B?t4Ozg3D}XA2oM!;JZm%vbq(+RL1nNeWD#UI zzVa@u#)ONvopmmQVD)twmGvl}&Q2~a4|TN<_H^DFd(_?5j39cvw4xLw#wb*kR?;^# zFN*nmVIvZ~J2BPdcK!JeUhQrPo*d{KpPYr&t;BXsPovR2drI_ffB8Zxm-RRuZkuf_ zyb0hgS3#++w(k6tLjj-7Y@J^VU%Pu>B>*u9vWWT@k=x$y6RDijYPFduZ`tDmudFFRihbK5sOoA}GHmsrI#RNf6;(}U zvIsz{*$j3I5rfgd^PHp$iy8sY5R#hjxBDs0hGfSVmba6swqVfjaUlqUP2Q!o^_8tC zImAsfVi^F6Wn7AHSy4O;V~iLEG7PS8=kDde`UgZwLI`pK2k9v-uLHr@?Q|UN?apQj zcc*56s@A+sqs%|evpj?t78|COL^pNcKmw8O41S8{6%91-EyOb#zV&8 zPHVxKAf8)WTc258Ie&bpudU_!=tD(SOKE00Fs$7yS7SqxtYq_fzt?3o8>@Ph(*hez zB9qPJ^KOT|J=iooJ;(6HMoTp;)g4fPlwEfLhlm({%?_G=rZA z8l_sd(6xTU65iTA*4x?D9AtS;lB5dxO*3Uw8WCNs5L4Tw*fQm?GDC0ml*^0nfBvPx zz+ntv$@f+?vq`D(B1Ab+JdFFd{Lx~$ZHP)E>r<(8nCcGV8o~XDkkSV1B;e}ubMI|K zF+8F|%o7zylSu+_YtYYeT%k~47)H}|L@^j>@;aS4QB0&WK#x6RP$HelvTBm77W#@^I0?-An0iht%Wxs3w1_hFqwTWyQZ$Cvw+r9c?O!-uOtwG z-OYiu^$k57T^rnD_B$Pls>CzJ!S&$2Os;?lHVM3L>FGab5DIu)n5g?WcuQM;pc!I4 z2_Mxqmvm2EII=z1-P+R{x;i$FRkhrxN!Pgut%_XhI!upRe9k@r1c0O}evi{^v+6bT zbbYMF=V3TD7LDuC(Ry%1B$XTeth|r?v~Kx{Njh#ZqcZt zy$&k^78-Fy-n2fkWR1w-t>uE!ji@m$D*R5dI+e@nh# zf*?xga(Y$jr(VROPYgpsFsN7eaOY976Irq6mnG%D8Z0xtw7L~bc( zbb1zW)rEzN8_Mk3(9N~*#LVJgM=LL|U{4x?p2kov>|YEFhNZ-5{q)h5Ol7k1M5@*2 z8|rGmKQ#?1>Kziw@pMT-@GH%D>RmI7Q@}M#`fkBml7qFrjJw#9z<&EfeSF69bt@-BoBq0@cCw&f6 z-h%1|htq@_Sh>i>!VU-_MNwkOG(fPkB>-;f)-j~VztGwovRllVTp>mEifW>#5n2Z! zHwa-NpMN;L@aBcn!<}t*vzdYL;rwDQQ=^;i!BLs20})DPv;X|rwY(@nhAkK8uN<0I zjRxg2Sd9O{7_n??ph;D5ERj+x65TbMRg_;jGT^qGZ$6w_4R6r4{CizcC|4-_>go-) zboz9yQjz7pj<&Zho|YBmw_n^?UEh|m3c2d*vj!UYPT{FdrxZl2C|Htx9@mk+E{0>q=N7Z+Or>mq1gb1=>CF;VHZjy|5-ipATT1Fsw% z)a4MCWGu^A(bl9j;A;>1iK^}*U77|(Aoa~Yml$#gMOmJhnz=hVF?xUE-u*}S?oZqs zqkkEDG%v_R;yLD!A>Q+PdXApYS`4`N+O-{xtym4dVz{nSy5#YLN&<$ZZb7Gpdu=w0sy(k_NgD3!Yzg?H$<%}C zIk2as>eH+v0)U)4GGMdXDhBUr_WNRq)Pu*x!Kto+lIcud6kGg0x7VZQ^BSE?(&@pz zCbvt|t?T!5E;&gK>;VRKiBY$@M<-`u@kDo9=-hB0z!=LVI#;!W-`?zR@%xCPR?v)7OUmda32eq`_uD5mN_}t<8s(Hqlw9dCB#!|VVGzvKDQQT8936{ZFf0j%5sS;w*~wIoox_; z`_uDMK}1!zrMd-@&L%Qhlff|9-BzrZ{;XV4@91te7+M29hJjd7RasUgSt^LYXbiYr zM8)xRy6PUU>V#504=`4VOpm9$`-Ech>gnSr2YZ({wtoJ>=ecYS14tzomA|EF(?myh zZDVU}6)9M9{5`?S=4$)nt6-#1V_hIJ>l(Or|T_ zt?L_VF&bO_UIce-C^VV<9;bqFB9#SuED$WKs=lGBsvUU(wGCCYA8N9uR8}29D2d{o zN3&V6(BIW|>c}8bl#(z~2*zyzU%=y{7gbb%W^x5l^w_PQCXbTKYtmm&D#vdfsWQ<7#qerxub+?MZ2|~%DpHyr7GSUK~<6OPR^v#*@3Rs zk^UZ_zn9Kis9&VJH5Bl=@lM@!xI40efQs!ZmWeK}xuQH;SkC71%|7q(-Y!C!zamK- zFKF!DYF^#io?BT(x>B@jngAUzOpM;iKAd07f{@5^e`kAtXPbVmiHdcV3_x^gl3mZ0Ax2$6W=;mn*~81v~jmrow^x*W4htJ{f`QQ(erclumT zLa->wHC~*0C&s2`kMwjL?dvuO!s6;WLNHelN2lj<1rh2sGagP3b+xxNd4KxmYhT`< z*o-HeoVE+ckAyt##kKYO({q(!HUtnA;DRLbEc0K!`_@M{@5NH-_CV966Gu!&;var_ zSCtjSvV?Xpo?KYEk{R(j9q+#O%9r;ilG)r)N88cibYrwZpA!PzJ^#K$0fJLxW ztZ5oGsZPHOh{ocd+#LPM4=%rT;dD!r=fU)RG@UUDyvJti?`Z9BZ~gGr-NnU~UBN|A zlTuVaJ;as4ZsKqg*zx7sjM{+W6+fN1egINMAR`njt4C%ITrLV|`tTblPdR z?}_;&gpgdJU@@Bh=ErY+eCu91m+xv0oIgHnV7WW9)8UP6eeh>mMxYWkmlthD!{5C7 zW_j>ejvo?ZVB6lbL+}NLR;DPsi%Uc5BdA zR)1@J5x^=|4_HwgYI?jv)WL+x$)AGYj!-)@%?wc3XdE zTTg5AumAYv%JLd4txVAcp%QhUSw1r$fQY2h*|#sAwU|uPODiU$aOwDwHhshEL?vuF6IkJZ(mGZ9@j@7 z^tXk&nuCA&=Ib}dAH`DXX0PYsiD9?hHZi|EIlojRQ;yc2Q3~Sdxp5CpW>Q7WbsVYxf_|4t2GecpgA< zZ)z4+#YNHZDvQbZU;O9?@l^W3^!#=rnat&kJb!Ya_rySNCYygSJy#jhPYAJ@O`V~D z9$e4yoY5#qve@GHDYUkh?wxzQumWi&2;%vN)3bdY&F6;u|NP3u)+X=N;%X|Fb6U(@ z&B0@R-T(B_)%A@nKsdnh^UG_|cp~6)|M`^*pO20wv)W@9M~=3InpV~~o11FOP)g@e zEQt3eXWB#l3nz}c?Y8h%gk_mnGJSt$o=Qr}cj{~wp(3@M`$7Og9JjC>{$gzW%~wwS zvF$q`&kf+djS z=2ye_r)Dpl9RAUz^B%i>W_8sh2qXPHBYoYObneFeaamH(p5h%U!7_eA0l6|D!)!%k zv&(D!ZLI|2$(3+;V+&Px{y!A7-8S&FPG9wy8e@@_p4R5eCy&mptS+x_B{SKAC|Qig zzV_y}fZt>k#-``)&&<*Y9Rf<_uZ|D&_*{;uxuwVR%i3(~C;alIK^$_og1l;)!QaI`t$F zlf&NM+RQS{e0T#OWZ?M|gMI02CXq}5mN{&91zOYQ)0_8V$*jfGD-xeu4eOl-A%o(n^gn%g z^_`37`a4_u+FNM+FHo>LIk))RFK%heQqjTzLFBMnSj7CN-+l7>nUVkIU;S8?WdM<^ z$af~D-uvttEOkYO5akPn-`%*)^IUgpsH5d=00GM|nN0q(TVu&=_E=B%b~IkIK}B)q zORK-Sdi{GRkM*~=jP&>L9J?8f7H7^rb;o7{&)F>&BX10PJ$k~bEUDQ-KAFmlhG%a- zoLUNRK(^>%KoF)f*^h3H{>kNY10AjX9c?TNb2;%BAAN2&8&3}QnTjctwZ4euoi>}v zU^qxsS_q-dVzC$v^&6jP8usnU$D$;^es1K{k^XZ>2B`}gAO;l*;@W0(JrVJy0!6{ly2L{pi)RfA`n#Dzdg~k|bXnoA})qx5^hK`WwX)sgG}s{_x_Nq3({M?oN({ zsZ9RoAAS~eyH5`FYRgh*A_k6k+N}E=gi1gN?Pd!?Xg@mVxNG+(@_F&fxsh|nhb|r; z*3=yc5rcC1!sll|yPN{e5zK3_^He5BC`aVQxA6 zcxhD>rKheLt{pKYnn?GwxAe8O>OqmZ0=p53etP@<#N1MuCF+4xA&>jt{^T8`SB8T? zkyRCwD`!r8|NJR{0nf9mThagS-~C70rwa2d>$F)R0}D)B zw-T0QS)VpUD4EIp?EMekICpZSulq0Ge!bKq9TJQq$<*v}_?h|7i(_1XlCIfkblI#% z<3X4$;aJ9Lu}YFs^$V6kcC!f=`;%3D1A!bny|lU!iM2HOHnw6bn-LnwyIaK2I#uy> zrmHP9(9uRK2x0~@nBeeM?33H0j~AB9VcwelpyC#v_rLzhpHKr>TWMKQFu~t{`w_1tiG=WuryU;qPAG@kz5A8%TX#*x8ZL#dNc6)@IV!JWy+CX?a((7>_& zp0h^>j0RzHZt2m&azT=fMuXF4srPXfF?NfIrZVd}t2C2_V?VwXx5aR(j}Bh5S%tEr;)TaoWOHVtHr3v5q)q?a{d0s zZ57~i!-MZ$xq#_XA_Uhqqkp(@dwg~g)>16ZMx)bW<=NVAQMLPsWeSzNn(t~=Nk`P@8+7&48BMrp3Y=@+nNVE+KYv@iKZ(i(&-yx6Cd6l%@;(SaUt6O z(&2F(?e3Hn_2$H5MOGM|+h?}bq0#gw9K+hp7KxV`#1K;3Nzgz8-zh*ibmkd8hG=UO zyTxL+noYc5;CWe5()nB>oeghA6j_4x>W)4Uv?*MB$PWRSTv&zxa2z+#(VEWYmex0) z3y0E!@~|Siolc*}>9yJ1c55n^izHLwt?2ev1n}HGPEjJsF1OR=unC;@m?+E3>zkq^ zmunspNj6)|T`d8(&E~L}h>2K8yeUX?^CR}i-%QB7X%ky1oa z+gpNNp@7w7*oY?9wxcT>n?AQQ(OX3o!#sF33}aLr(-*o2yaK9 zp9%aTL6gSYgMNfyIgJojWFkS@5BBlWDxml9w0l`IR@KMLtBR}?iJA6PXb<>VmRSsM zWOI2vQIjZ2ODN#6+t#D8jm>RXrcy$h+%CV@6^*CXwzhY?DZ0{3NJmS^=W;YT?23xF zlgaRQWG%b}ITnKIAV#7nLBFrH$>(#~Q`y|=c4T%r9P)X5F6a7obTb+&&Jbgx(a_u4 zEGg>L!jjuz>j(t`PG>5YTaU)3m)9^2lr3Jw)Q>>kvv`!w>EJ!u;L6m(vd3ZT3UJU--im}bwjjq<-Gov1h-7ovJA!_%-DWiz zqp5Txk(^y!mkNa^=JgR-@p)Z#tA&>JM!7;^d1Fga6@;FWD^e@SVX=6e_9my(ZZQfR zpUD>z>CEE##^zR(()q6P=w_p#E!b2}0MYYrik(YoZS5 zf;6|hS{6*jQe=t}3^e)N4v*btGn&(R?Sw9^Zza+hSW(4+NV3=C>TL~Cfu)9{F-;zA zZ25iOfY%j?Cs%9omg3X^P!+$|W3!q$hD8j@=JJ}hfN@Kc&t-V_aHmk#G5hXd9%WlRJn~_*Ln}-b4V-wngev{Fl=|A}b zfH36qG&vn}YwLPVL60VdJ=^g_cyn8ik0>60h`D^Dj3Zm~bqsol2*}ZL^w9 z1_LK#3;9?&yA_Ksu7+tHLZ3Dpg4;TrrR(z_N&@3Ij#e zjOslu1^Q}|_7Q`it)8kvo+tYA0+04Aqb|0AqEgWd39Yt|1Rg6YkR-q`+D!JDIfSZG zo;kg5q%$YQuEkFTEqkW-Cs0&v>2^UlLIe=xl!KrDl0SsP5?v8(`D1}sxk zu<3@7zDM;Q*!8eRIg+Lo^4$LQt`S1oCMvSN`=#cV7%*(j;%Y>u(_qS&TOviE_b}W$Nad1h+L)wKDa*p$(P-L!$;#dh)yKrzR}iQhJT%y$-c2@b=ug{S z@SL_as#>HLMLVRGy^)Lj0YW4Q+Cjh=@f@Zf(fcu$y@lWz=o}Lul?PViyRHK6UBe?(?1>VY5V1;*FXFGOTck}L69o+fMu9Hb&M;hMO*ozzxRpMqA_q8 zupFXg9e|2!vfSFA)%m&pB!YnFHFA*-PAyD%*NWVmc1PFS*J=Mmkl|{~oL~%UrQ7UU z2!Ss>O!k}79x+tT1}A!lWR69;_)t{<(bt)eX@sGw`v`>{P`|6TED)6!syGP7K<`Gw z?_y}0vI3DTify65-~8Fz0F%Fe|AVFFRi;jvfI1tBm|6vdp1h$5!ri`By+)D7>u8w7 zj>52~%x6)2(|E3;j)aiLj*7hH3CpElh?MADBU)Ws*#F4yywPsCP~xv+?L3cbETcZpa)2&F872J)1Gy(JQF-vnnL`K%`nzdkC$dx3v)JFF6}3*mCY@N zH}ndm6@zoN2d`RFZRW+-L_(R)pFqY>Pv^!OtY_~s>Wrnt}21UZ-7ARS0$*!L)uFAiGKdYnH~`>Uy3wq}BVO(w;+zGYT^8r_C4yQilLa)FK?5 zS~LdEGMFZV(#1TeBr0!cxs5Hxm}S}i?q1y$tM~gVU3QYHUd{-iWG1`3UhG&~$?Y)> zMTP=XZ{`RgXgcr-Ma2{P#3y%tq%WgU&~p*>N{z)z=3iq*rpYx}n+U5x^fff?eSPic z#u^MwEr=?{EXQ9sK5RFc$EIf1!W)|XQ|B9=^M>w`(WE39Y5lX}S0I7eD~WXR4az6N zAb^AMmeM!xp>sVhrv8zqREKKHg&lrN#WybU7Jjb`Wds4h9r1y94$;oldMH^<1)xZ` z2BDPZkC0Iyweo2zzU4qPgK1GcKuCXo&p>-?K@{&zJ*Fr3WuFG9E6k-*2Bm%9!{z8+ zS&W9YX#DZY+QhlQp4|YhX`UT=K1E=>tGLb!N;;}!mwFBQ zr}^N1S#srpVa4BTVyE4F>8bsnul!wgXN#wwx!xD?Z~yFV1JA1}fr!vDPkJW@LP$kh z20{!5etK#3fBc7E0CX_zWbeyXPWWImf7&mqqMU<|y#LpHvrOMIp6cM#d${J3)?1MM z7Yf&+-X}gxcFfq?gFo>Q5t?lFZ~x*)w4Dr~biUK5WG9}NDk3JM@L+oW-~B)Tf>=~} zBX+h*i$5Yfh3b?F>pLTK@!?Qce=_e+GPJaO#&%KE3b z$MT}cK&1DArV9v5p5LMtPYC2_&sg0r${?0SEb~oRQ=(IgtbB3rp<0Y>D)s-?Z%WDv zzutzO=N7`7fcXZzS_2G3Z=OHZ(&UpQB_|esef9d({1W2%7hBe=fv*WvwL292>mOdp z=S4!`gXy_neRjPV+wxUUZGRw^L7bU3xo8k-0}VVkJbMms0}V9LKm!fz1HIJ3nInUK zw{v1{X>Ms{|N5m3H1Gle;VVZ5tY+hOEV;P85ly5T$&lX>P)4M-1e%WZb|%x=@OETr zV@pv~c;L#24K&cew+syfxPb;5XrO@?2tteZ)T=PpKm#uggr}1~q=SKrjXh z?Qwt*Kn!~7{*Mhb(7?9`jS%Pt8fc(_23{Z#w_k<11{!#2K&9G^4O09L0wDxxX}yD# za%iA|2EN^B7{CoQ&_DwXyg)Qm%?7?EXi(yJ8I3!*1{!Ffj7A7h0}V9LKm!dl&_DwX zG|)f;-w8Af;079K;5h(NeATmPpn(P&Xy94U;NcB4(7<;GjS%PtzU!bOzY!R9SYSl~ zDh3>Tcom8c0KcAl^3_ty-nIQ&E>Z(u3%odL(MNu9w6|yG-(WH?ax-wyw}2Jh_h0bT zs)1#hb}-{!^7np)fbgpnG}Uze4I%Rvef(cOzI|`hz(Mew4d4bDHSmpt-UPC7^L^7U(v>A;{vJCbt=2Pk77!yD+_*%LnD7Abw^CE;06@yaCUF}B@)okNY*12mga)V2M z3s62A_0k$CwW#ICwFm1^%QHYb1%$ky6Iz4L5qe%<^fIrIbcd>nG0@w$!Y42egb*R5 zxVHM!FN@C*0uTee9yiOe8b=tC|eM-g=%Vfwy(Ds3@nTC zk}OFzZo3W%j%6JdQ;9KXbe=5~vIX(*BiCR=0BWbYCL=mDup|Sl@+PCf$O8Zr73U>s zpSh{|JYZRi$%GgL2+2#5qNp!{0Txv@MLB+$&wl&i*rOT5b1&L1Eo}$aI8CZ9(TJ{R_>bK$DEpoW3nz+Py2KLKThW`Agwn=Chn z0`FWrn=Od{^63|;Y)-#}e<_pwmVho7JTuhS-s1oH?>|kY(umuoIT1nxjthC+nntJ@ zXWFkMS&nD2c~Jrw69^H*=pR7{sk!`>OJ_&=x<0@6@am1*tjVO7&AG1*I<>U5G{1e} zbTXIyF=^ zvik@EfK|20=lRdydL1GN2vJmp=M6U=jDK=-6tc`=nx7&FRY`j12d{QFH-GTuo!g@i zS(EX3olt$|fhO-?zkLN!gH)z;e(nDFr?)g)F^ExA z_1B+&0IES%zB#kF%m_TjFW3;(7%xy2a(m+O@xIR2&z_iBT+K-me4+e}5Ua`5v&%prJQ{oH;{`Iq`y-ug9;+x|W6VtPR=MLFrTsv%{^cQcv*6j1X_t~|DIeEQ25cn*J%taypa0H#vQKaypkU z!1DSN;<8&qey`PH`ugmMPAz7Wv8%N?mPoNLnObN>q@t+NWRis}#;V)l@VJ~7ld%*j z3W}?(s7+1Izk2g~Ccz-^{D1nx#}B9G5YJJsFfs7_pI*Mu)fVC)8l9f~#qU3XJl|lU z-w5ccC(m;~ICtvgV6Rc&d6oqbDhf$vGG9J?^u^ftPWAzWvZ8D!l8`|F6Su?Wb2-fh z01w{@&)nmVG;Uu)kuH$U%vSof+&^Cin5GYglZWT+QS%|48mXj z;MIZ7_H3?@&gC@wgXozIFX^HmW5V!UIK2K~X5rGvkuygJKfQhn8I3Q9IS@h+LhoES zb84vXgD>x_Zbe>lG>Xp8t!8siTPuK&fiK?$4JAd6B$8Mm01}_e<#jnt!eQzBFd&3< z1e%7s+Sj%sW3vmGklI^9h{tIU`FuvB@bwYUhXRByr#0mDmH12p-xWLu1DF6nRP|_A zSIFNaOA^a5=y`B$NaOO&R!dJyGsB=rG6fJ41patm_m-Y#Sm%zaf$thL9fAM`!I8e6 zBRw6NTz+wN4KU0>KA9_s^ULemLP2SSzC1m2rTpwruft-!`EV*3Pu32cgMejN2*`Fk zmMathK^((4Y}V!`Pq4`|*wy~C-+Y+O<;%CuvZ9KTqAK{M>3;_TgxP$4Zh1AG$x&)~ zd2^!?a%d}&{QF=0Mk9Aw`Qhsq10I*ED#c${fB{etI9}jch8^r|o0wn1gdhlkBD=kw zzV;RsVuAklrOm&ALqfmWaN4YYdgbC!S6jXyPA`VHQwaco+h*-<30^rfGQYOIu(GCK z3PXf8qVfOZpWfF#tg1i${`UeNw~BQZcX*u@%8DwZZ_to=5?XCatn-4R&Jfi03v%dua zfPt4z9OGE-liT;M-WtuwGJQ9)H?J2$1jPKnKyQCXTX-w-Paj@g*@~!`07&@@$G&Ly z@2+XH%-Gb-+2Ot;-JN$HJx*tHh(RyJ#vvr&b`AG-Wb=iGvx@+$ka_9O3<%TN?CjF= z>ShF!m+HgRXn{e?8{7Zk|NWKzgP;EJ4WG+iI-y??Flh@ky?OEMo$;v$bBlU*bj=4P zRh4B4f3>rHu22+JRAlvKowWwOCOE7Pq(|>Gc|6DZx;M8Y9Lw6R77U&PH_^0VQ4IM! zR-<7yyq->`0L%K_4y(y{^TDHBHuuc&Ic1Lt(ZvkZ3jr4Yxi=}nKm zD2HdmJ+c~BR4aKNWpW{*pvp|K8iD@9{hp*>OJ`o=$(@zaL@$lCD_7#EH8Lr^SJhYS z6GyaXGTJikvx$15Vli&Ea^-h_M*lm(Kvfh;RdgvrDrQ&{+OQ0>wjKT1Z$Bi2yQ8S0i7YXSS&-`?de>0b~OwLMi8pCrj4KF24)>oK>g#9@eE;0ZUw-_>&cT3y(I{0vEMWg-FVaPQX!by@qFg(4b$~V3sbytj z>u10D5YzZ!xIgLcIiZNAJy(51Pv|Y`xcZyuPu8jnqs>p?wV5NtuzK2OjY~ibG(9_) z7xRV0P)GZ347W1b96<;m)YB3~470f%H~Ixq%MhxfbA2*3&%W-QqHDNQI%MacPvAvQ zI7H=d*>jZ2A1sFpfTF2TZxFiv-{_PXNJSEZ&<|cYeXO^8J0Ab_=QkcqFAz!8)=1#H zT0$3&4pyam>(m11^)y3o1L2cQ-|F3`DG9YGk?7&pu(F5|%{{3}ExSgpqVrl$@5~%y zhGpmmBPy~E?HQwi5RicD_b2W=nB=HFRlY`1jZDkiGKzslI|> zi`nR~n$x-5==5ABonedyToI{J@hn{c1YI@@&$9Dt8;i@U&>)nlbuZOE&qK6(HMR9+ zn5E6Fm90o;(BIh{ygxAonc9rvFwo?zEMGW2Y&9D{yE(SC9YHL6NMT2_1Bkx*2j}P0 z%WKP<+p4O{3f@Pc?_E8e;nf_vlGOK~gEopz=k#>b?|Fh#M^ACsdDA*#?OroZ+cKVug6;ss>_vg;14Gx%4MA% zb+V)tJ!@)!CF4ThP+d{1p`YvjK!93SPc!&IqxsiU zQ3)XoLg$YU7i48@@-dB?IP{zAkx7IASq}KzMw4NFc@5Bw?`{q$Dp`p{uqeT2%3g!? z)Dl{3Q4lzmVF<<;6M_lPb3DhEc6|p}ma(j`4B|Of&_2T=1hJyvJ;^)_!%#*>uv)E^ z<6?>;TgYXl1Z_dIkyUl?^9X9s>pnn&3CFU6z%vNpI`x6HMN?HeZAPDr_7hpDcO#(hFP3El z0LL*4!=^1!F^LgU(S!iTvZ69H^rI#zqJ0ZF zX>6bB7JGLJOB!A6UURHdW36$D)FP{Dic-t|Qlxl7ago#ofoE9;t7_Hw8hJ|+R#cW@ zc6f_M^9s$3uJK}|1YLKose@5Vgei#mxwUnk=lWVhno3Rx!?Gs^dgfQxw&Dqv8aCC_ zM#X7s^Wo`nqmu!9B%ZGSVO5*CAaH~p9;{;hpZc$=K0}l46ura|^*_@?q$vB8-PAU> zVljD+GZ+Mc<0u*EWpyEgv}XvY6Rk&IiuvZp3K2;P27SZb9oby&w_n`4|7Zp>h&7p5 zvk4+Jx48U|A75SFio)G_A7x^pzQ-N?v*uUmciH;x>t{maTCzfF@n?jKBN0`_MB{S2 z!09_xWAcPxNg}FF=NzSTMklSBgICcxEEaa7T;Xt_2S!r_RZYgJl8OtGoJgndJ)E4F zTL6S~G>2HqXLgpQ&QNuKji!p`Z}5u z$?W~P1y!k`T|6x`Jpn<=MJeZE8KUBPsz{liONx@pWV87~JtH4Nx`6u8pchFxm&U55 zGuCW&pns{rQQe7LYx3i_>vrq@w2<59hVaWJ^k&ix5g*CTbhS^+LEd4XSYY^);4~4 z;cS0bOFl1sKKk(C^c<;}lE2B@5%PD1f^NIbYBr^_xs6C{WpnHH7NsJE@f=W%AT`RSFl>!ahjLZKWOMpV^iwY+`d3?}%!t2bOO$JwF2)_|Ym z*v&}n%kjzCr8RBv3a$?!R11>R?HK9nZt;2jUbm#k;jQSS`K5=m3mVthom8oFNyIQm z`g?j?f*z;C?Xc$xg|+SI`0V_`>PA755W}dV*xT8DZn!@$$qa*hE{7;9!+o88j|(xH zv@i<7okx!!%`PI2EnmuFSw3@gs6F5lWf{`^pIo8v$FcEDzEF!}0xU{h9WBSYJDNOh zm)#nPr8c9nTT|2F@J6|1qF&a*ZnwVu%4tQx?|pvV>-L@=>}w4+aV)zLjeYrGa&Bc6 zGVHF`q>%;P?JdW8yPBIkR*P9ylw>Nqva$JaZee+2`!Is-Ww?(CX=(Br3_>K8DrB;Z z$yhrok^qP?b?a&`gs44?7)B`+W|mh6I$JCjlig~GMdSM8R7?U@Q5l}UaD2GCHRN?V zQ`y|(#pUY{9%*VNM7s0DVm5vM+({OpPi~KGMPdxcmM=00Ay%XD^|K>JLHOh7!`1Mn z9-d9tob+_I9`ET2`rKBFMONfQGP|<5`EX`oWoy@(>vVYj#BhtxBWgA?WEhmn<-Qml z&xzH=u0iPrSd}E7&wHY`%ja_Xy)J}MG@hJUULBubj3v|MePIv^1pOVMKxfeJa@Z_J zVl*SNoB!cE87Mvskv`vCT;Q=A-GY z&27lDmBgwa3ZKjQ>dB*$B!7789%nS1KRVdc)@(MJGTHpx%IdZIkJ8yJB;?d^e}8++ z^vc?edk=S&gwO~GkMwsBcXuppY+k*6|EsDrdw6JC1;?Eo>@x^_cq{sFeu1(bq6h3j z01(fMqFg_=NV6VEu@e%gNl_Y|OOnNAJvZDR@_7PYcR`dlqp|7b)zQfrtO+<&!BHTF z7$PW1ip}l3a^^&PD8Mjqbu%(CzcfBQ2WhxL`HNVF3Hp7VA%A({u~R>vUW=K8!K`d~7_CN+XT5wCBHn{+yP=`FwtLGx}g=er9O}AOv^cY7vZsUeD!`qv>4kqg(fE7V|4d2fJFD zjYck=$;~dWUAsS#qlqV#Is?IEWphixYI~qbFbHxw3z_FxCr{f-RXsV-=d#)EPCTAp zUPA)E&+YSp#spxnx9ezEhu`b6m`nvpj;B&f;f)6~3tO>xC8N`s|CO`HT~@22S*nm@ z+3k4z`h!VHRpIV2v7)~I%Bhgo^ZDq5@aFcV<3~E0Lr%Lnk z^%!`5BNm%m3Ez1%t?eVnJj3B2Dz;cmKe~99x(SKLY0t}2z~}i_uV13JED)&n^3ko) z>d<`vWmz$p&6iFeYi|xZZI*a4{cv{S&iG@!%-gQu0*qVyzDvi4W2y880iC4<-wuSl%gBl$t~SBi$X%UXRD^%w%(`o7-bEa|_F1jg6OQLI}^Y z!Jxm3@)n29YBC7XL~1J*n_dY&n4Tv9@2ciS53gV_oIBFr9SVBg&S)a_Xnt|*@tmq( zIX_!SDFFhRK(NWpv;0OhI=8ZRXL6>HE9e`l4}Rs;af2W{oSi#&WH8|KgtxYTckMPN z1=K*79XEqo|u^jO!du%UJ2~d zu_3?LedGS4rSSTR;en&QogSB6m6f%KroJw$g#m;?ulw@JW7&NE4>#`S3x&$%C73k1 zov)obCd=wax9%k~nM3B1yaYVE0jx^`huv{vpl>r48=swb(BNmi1jn1%Jbn9N&=cu#XEmQFV}dH>bxmp7vEO&XSiRaFp#pS*r~pt~)X6_c5CI-BKK z?&M(KxnqO9?Je(rb}gF9=#d79Wh0R&&#~u^4V#R{pa1S-g^D`ryI_BO`TU9g-Y*|M z+K$E%diJOlLddbfzPHYw_BXi{N!f}fMOkhOHjVW6^tOin{{0UMqNvMbqNpy1`Og`^+*xr5ZLU=0*5Nr|xRi}{zIe4&&KNLo&lKRw)^7sc77@H>~zSuDnAJn67nf=%x3me4=^`h)q^b;O|Z zJtCIn{_c*qFP!dd38pjoNFu?r?AhUg69c`U-5UMm_NZo%m!ddm!kgl9*xopI>f}(5 zf#u?obA zX%6`MyIPL+b-s7?#@N(su{J67oEv!I^w2;qUzlB4`|;)TMw2m?NI7igfY0649Q?oD z|HD#vvz%2$MXYk}*zmirUND-3?RX-dN^>mh_qa|B_O$qY|Nj5`HF$cZkCg5UpI|&| z!;+fe$_D|2P*qjIYH4ygMMYZIsHKKevGg?1P1d3>N5A=|`ERl!VFmMuqwdC{#Y6zW z1T!r2*7q*Fb@BB2Rs=$(v)SL%60}(?zy9o6v8yH^*?b}BarL*iZN?H?(OCKG5khLA zFmz<#^2uWx+tFM>q)|hf0f1%o!inKOxqQJO2wRDGJelG-Ht2Pq8tw^s-T(1lep^Wc zkX~%`wzeGY?a1ath5-g%*p4M`J)FuFs-vJ`@$JZ|qk}&;ZXbzkh z=`AB1+~3if z$`woo;mVog1xfng>UG9s(1SHFA+Mc3b>-BE)ohNZQ^|C;Jro@3=s42T@vG0SP0TG- zsVRUEi^*_uxGx@0&#i{PcY0)~t38#^=5vzEZyD+9jHNO+#~u;_kjB&cnq01j)AQMU zp%^1bWs}une)Z(Bwr2lAcwN7v{F>(!)B_Dx61rLfkbud#MO9V-+Sxu*C&+&np!NjO zJyCn&d-YqMpug$QuUzbG4X6sTMk9UD4# zY_Ov_@WB^1)7tT@?IokGG0FnTVXfcN+iEtjqN+quWJLig23Tc{LVrgq1k7eMru`5V z#f05zzHoeaGaCQk=G}6`R=pVBso`E(A@`sV8l4Z_L}lhR&w0+{pWoxKo8(n>?a9n6Alv2G zl#pYGdM}?kRv+}ol9^aCWs@xDjt&m>c7FE#?VC@YkqRa^|HEao9zWE(5)Ln}Z(Ke# z(%D>>%;pNR;;(ZZ?e1KU#-5B#B1ziSQM8pzr*nC)+v#)IGpWp8hI!?MkYSexYk~tE zExAJB&e$Y)?YeeK$lIrmUp_gEIJ6m!#gZve6q-Xdrw04n_S=8|#kb|yHz>*$(%;tF z6bck%4Ps!Eq}k=w+fOHeMt5lc;x}8mnj2Tbnh_ay(*fSo2val&N`J3X z%StpY;PqTSJ#w_Klj>kI9+Oq2tGRKgtD~d2{^P4RBk=@-kA4YSnoq)WTyI-5HKS<= z>Zm0qjM))>T8nOtXk0g4Yfym+Lt$@Q?v6}5Ewc;9T^(9eBx-0 z&l^joX1zFkDM*Q@Av)XPu}snotbPQkrg{ zZH;w-Kzm~yLP+GeWBuKdAX+5BZMP{3>1}Uu*sTBdZ-1{7Qc8oXH9p@vXGVs4+jWg> z#uBQk_O&(*^>lPJ)_wf#tym(3cur@9Gd%{w`Z}BIAjCY!4fpqmqG%Nbx80%YYF}Hk z-D3Ga{`MoCvWp`~WVNli@z38l(@-0b3tBi9bvf*9O|?gQJHNXh`100$R^<*WHwYux z-O|+6(y*|)(bd-Y!I=}PrYAF5!D{PhZSXoB|Nd`&0|YK7D;E~jH^dG*=!{Ic>MeccYL_3L~0iJ~Bg0EJ|CYspJ2LI@Pa zvDwA{R_{5kvSd-hZnxR3i!e|Ml_bTd~A@XGcyS8EC5ug(F+$4=_fVeE!pGcSfgY z^RmoliU7CM@y?0iq3%vi*Z=zCFIhmv;G?{({{2_qyBv<8p7!N%${ zOA$uzo*6kY(6!I_wvY9}z_nP-<|CBtDs*TU^`FbRuB%;rY`Y z&GlDq-M{yA0`oj!_4(;s9#*J8<5vRz$LqJhz5hs+<>rR^4=$XPHI?qcKQM(7!uT@!ik;_;!Qy`3LkJhd51tZi&!fu|(}Caa20v>#tM{cvLX*5hXy zAwG}u{WB*TYlG*H4*vD(7uy#n#OrZgIxz$g_`|iEkDg7*L`M)wJlECSXtPKdBkd(o z8BDNKo89*37fyR!F3q?gAjJwF)4wzfNdf@}Q6!Q4FMqtU-@t?CFP*sUR+q!3C|W$3 zGK}*5ze^0O&M@>C?o6?QlB#OZ=My9$8cSrd1yBkNU?&HQmNJD)x(`_WV=VcctRWNf*kH-p%iXdojsDJm&@%PW4*h(fJ zPfP<=??5Td$^}UjetqTI?9BY>6GLwuKYFaM>+9POuiqQ(?r8n_<%@0gHIm&bvrYCI<)Ah@zhh0|t zUw!<=@@B-e2F?u)$f^n`MS}2jdiKOXPi?^4*Vc09!Q<_j9F*3vOlCL|do(!*4s6)| zq1KDkYrNWH2s|f=B0wOLNK#!#h&QXgD}jY6rTeKBLmSX?%wxCrYjy*@cY36wseW#C z?bGjWN0Mm_VP|vwc}*3uA)rV>~0jp~|)N+ESZX+cpHZTERM3TG->pIVI7mp5V8UefGYZM@)GI?&d z)`WcC#kGwekp8Sb77Vtx1bv>TGjr2RE0E(}IsI!|L`}7!3&##+<@_JNzB4_ytP)m{ zEAWFIZH5(&(XOTdh|z!j^ed6)HANZdYQKEyxT2{01#VTMB{S)>Lx(oDV*l%3{arer z6EJt-=-{#bu2V<)N2lkb@i=0AE4r!$e4h8upSDSsdrv36zWXq*D3U0AaOT9xL;YHD zru{x-F}Go7D0TZE%*PNo?%lH^$NPI{mzF=jc|RPBQwm&m>)R)f4R&@YiuPYVx$?5I z#q9b{Wpn@TSHCw+qvQSEZ=X6oHZ%W+?{71VD??>hCMSxIgiu~l-xxWvvJv^;{_5}2 zxtzdp7l)4=?ddo(H1K$4J|0i*lIMibf~-IY-amhWgXlm0_LFcdK?o6f{^Iaqo#^Hp zQ%&pbX#MG>vxI<8zPtTkVp`Tz4&wgy=F6vt|NPR~&3JNQWeu8!E2SKV*CWxdZ$F$_ zUM;Aqrs){M`kKIx&YXC2WGI`LzrKD4TORPw z-aB_fk1iy zZ)t4G;t%hKHib^z8?ai)5*I2UXY7`Fvknb9-am*LNN~n4Do%Z=kEW!RK}Z zR>u-cB=0|)x^&{`v7XLn)AQvmrT|D5sk^B`Rtk@%XXOLx0skqj*KmL}t*xoCr>%8t zc6N1R6BJ8c>FbosuzFmS%4AZ>Bv3WK$88s-XLF0Qa|@s#_p~)53@7Fm7Z(hPXbL{2}j!M>Sk9~=T_E%EKe@4of$e*<8=Xc;Ei)GE5H2gsv${r zy;x-gL1HU*mE-Dz{8xFHa9i|{8O_FJ0&dS1%7^M<)g2^2ZR93B{n19*= zez)u4^v)C2Fe7=y+Xqu^1a>h&qBf4px*M-j?T9Jwq^)HJe`RrQxJ1zl7|$F9>K@o-T4SU z{)MHLvN1oix*kiUYXkn~U|?!-xfmN(l*Wd-#+qO%lO3B~1m(~;(Hm-vU04lo-gz>S z&1Man>lHMwhCLNeCgY4-+%6Yosh#SC2SAXdw?>W%0)PGC(~rKnuE}x{jodh)F^|)s zo0_<2d1U|klPf!93xJ78^gBtc^?N%T8$2FYJe5IspBNu3)!OX+kU~nRz;hGxOPWTT zLV>lGMwiuoZ(>r<6!4-L!aOPo7}}fByD`OT&k! zmsaw!Vg|oN%&$cvv&*aLRC;=8?ZUCad|r7tF_X#VAJ5FcfBuxyZgbl0o6#76h(q}F zkzR`^-g!Fl>36po>JLh2+ONO57Vx_2gMp*n?N@F;*!yz2PQ7ktK~etGKYY5hyb8?b zBT9d{xrGsef&d{*C)4w*>n(M`&ZfHi&n9&!oy-`8sZg9T_?aWeWvBwqtoDU)cUCMn#{q$_-7rpc2pQ;25guZEek+Ee(lG_M@xU z7nfE6&jA3VcOq8F^4^(|!#!>HpH0To8B_+aVC4X^rvC2g^@Z65AQ~t1^IP|QZr9P? z&XWV(OKThEUO>(hP`>7^$^?W^QPj;y)Fft3mJx!PeBRJpoLS2XjSVf9#86(~E{zOXCE?rq zPyYDbEu9%Bl#m;bo(dfI;Tva67;tSTL;{3RBYM#5&gJC)_`5%DtVcj8#{4&%TM)wC zQ_$FdYDCY=DumExm43ih8Ww2NT`uQPPp6`3H%FgQLSAd;DywK{4EaS~oS9vCFgB&C zs$mi71Q5Eh6~p`3htXKV_&-HS)P^7i2X-|=4x_ZD{r2kh$VS*ODpc*$+xOd>>-{cQ zt=}Du#fmXV2#$2O`(4h(jqtC({!Yo|0f&<@?RVc?|8GCMTo(uen%ws$bQhoRCJ9y5 z{(+vsj@GrU$lrf?ZEbVQtOQOZlmBq_y4UF#=xiD2YR5>Cy}V}7CP?XJR9OznkrW)LdTtfiC5ETmQ@4mSn4Q~N~2detnO+!I@9gdLK9gin=-v=V1 zlaSw&%4Gk;?>^azM2qiB>0gB-hPG#hu^@?;jt>bO{_^(2FK*l=uo$8H?*6Dn5Z*g? z;?nV<|GXY9MxJ3WJ6!%9dCp>UB9kID4|UwfV;A(}H5SrbH(JpSQoG zS<|#DcOO1`G7cmQAo_RrMqLiunWF7mrO?WF_+|We&_uqW)aXSuowT({CPE5_2 zA{`;5>gux@cJUPwO(q;R>*L9}L?V$;wY5mJITWh#dKQg~8}hu5<-;~(iQj(p-IgIz zI6%SG>vx@Y+xeq|$9g-SOwZ;N1yvS(vtmOYacvECS8hMJa^o&22ASyU<`ypqkmJne z>Z#?`Q-l4@wKYMndo!Ly3}Z!?<({s#0E2^>TV03z>%!XoY1S(`z>pA&)p~MpAeYUJ zPR~3I6`2AmRG1NzQZt)x{6l4VLRRXzuo~Wq$3p?X-{ss0Z<+o-D}?6f zmR7&G_2AFmIQ7oy<3uNyPad_3;%~pdJGZ#{%0unVY-C4kQ$tNKlP=tOIw9wBoYh(u zc9haYIs+LmnsL_d<_3!-Ev&B380^TKwhNJ@>BW_`jYv}{(A(0qw!Q(&Nni>E0gumz zXBJnG-O4;yL^OS6W3#)t(P6bf2Gv-CDFV>2fzX{N#wfy z9CrY#K?VWx9029#+>8hUFABV>Y6V$-j`Bc&5Zc?`>~=XILOjMnk9)AI-Q}>Uiu%oi zC;40si6SkTs1h$eS)N;5T}L*P6*1BEm5p#~L&$Ekl)`Ng@;I4Hj?XV#JbvUzPv_*q zQrTqeY&2|?r*rcQtLw$tFwF8ZXbv=o-D@I8DqCY_ zX?ogPY6IRxDsy{mLQ@n@k}7+WPK;$BX7IX!e29Kheo*|%Qc6mLKPT4G0pUV-C zr$F`QRSF2$zh{YGV8c0WV0Q=>bCo^@7sc($G;Cip5i3 z-g=-VQ$RFNCw#B;rP^We^+~BeNDi(wzh#pk<~xjJDVHxiu!DBfvT$M)f~j1 zue~^gL=Mv(CTtZz0Y%eaZsHodhR1G02r|z^F}*TMsoiSr8R#)VJ~_HQayn1Qw&E#uf+y z|7><~H5@?}i*b1l>#{mJIXBeP(cRi8Nm4eKGvf|AA;v4iVp^ZcD8}bYYw2v-W)a;^ zM=F!qCrCj7DVk}oZ(EpZ3s4k}Y{fGfjwuhxBGmZ2?G1Iwf!=TLjb`%O^VrNlTy?zk zVn%%@Rt@vG2t=4!7iNoTr81&BaW0ApTS_^I0D+i+;jlthQ2sJv;k=&Cww6$EeJgh5 z_I*8*1%g1C(FGLc=F^E|{awv<0hiqtO(b`9~n7&IzUeKuKaoGfcR}GUxL~K6x0Xh`% zMR{(>^6F7uWv>J9=FqKEfv2Li&g4sZXlFAnVeN* z1xtb<&Urqz6`NgIZ>kNN$WhgA16i*pDT8J0;B-QC!D2bm)sA`Y`u(SCi_3uLs0r@{ z(dN?Xz47Tkd*e(~DDY%_>Xl4lVDceFI)^YzB|x6%G_>7CwfnM!N)k8ra6TGKV5`K= zs6m#E%`Lyn<+NKtHAjFFB8cL*_n&NSMa!&M8n6NphD28SI$IlR{LA63ukSu2*=;_j zigNwQ_)uSGOMTE`wWKmR6N%f9EvBk6WeY1|06LjoSk4U(+AX5XZZAY)7#sF;cT1zo z?pTd%%`C1!Nx~RW4(Y1)V0`A}Kp!u>*8Emv*biDxmLaPv$>j1Jhl|1lf`Y0d3`K@o zrx7CAtVg=qAVS~W9}O?98n3CEssnB;tlWP#_2cuWnri|R({m;nA%vQ$n-^zQjRhW~ zOd)5+Ze$YyPT)+Vg11Or4Rr{?+2ysZSlm>d2ttk0$5XRMy4q{~zQ)>`$>}*L?!b6w zQk|PyUc2$|3FHvMTv=+V44~9d7NW4Uy1ux+(cRkA-`TeL{T*brm>?0oEzMSoF!yvV z7Kvf2_$Qm+_B!j89AF3uCCB=Dyl&Uc(I?S(oD)S|RbjE72ErFW=ihG?M}iQ7C8@Qs zZk4$XL{U9Huh;2}r?ab@n}!8i4VUzf5xaXalFUO4^W>BQn@ zWHXikOrXP8MGJ=vtav#bd4A~{!+?3UyO=*O60piljXjG(UDq$2IL324GaRW-sG@7j z8=F_}KAM_eG;HNtXXC6GASlZl*#Gga+N^U7;sP|ao zrXA#QIYdERTn$I#Nx-W6_C2AdnWO6#t978a+2?WEZB~I7SmqW>77IW?;CR4L6{_cd zO~~^DwugP|!T99A{NNp*)47?5FS1e{vOS-Pbwm=~?Qrmdpwq&S&Yw^<9hBfeC@}P( z-D0IWZ43t1!du1nFg#m^gvy2`usOp(WuU%*Qbp73R*5qcS%#lxu?RvsQ;5XlfaCP? zAtH1ovIPNjSZy3ea+McAil)tPYye${Ox$gImZk9<5aXptba5?wsH3f;p>}O!6EL^? zPlCrFb-|hO#58oj=QFUDsg+~+9b1UoBhXobe+sY`=JBmUv#txeeLbw)=7U9P! z)s4O9He-o&HX91~Y*q^(+b~oN+nC!*B$aHQvr5dgaJ4P>gUgn+2+hcwRRJCn0%R1`rsB0v1c~ILtY1RtmsMWD79R z{i0DqNi>nx*qEh#rI0xv8o<4e!9o~7;5nbmRVb+aoh|jDU{OA^D-uAx4u`6$0gp?t zS`@<}zZCxEywyg~x_$ zu}D0^Ji;1}ITj8!NM*NLMV?QlGl_HtVyshYLf|B`xnwpM6y2{AB&7fm3N=kLOMz5P zM+_&7l?6dfH{M1R1!g)EQQ$pJr=qHZJsr*UwXn}|M^m%xTq_PnqX7EsB zfYpRjO*2e-RU?$mK+<)cLMjS|V?qeA2!hROA%w1kBL;u6w=!S_|1{F9k^ zPzvGw(_b&`03!%>Rc)%T>u+sY+1R}IY{F2_iV8GU)io2%&e$ha)zPb%*QWDHbiFR% z^Ew@mCT0w|DT(zVzuRGdFflFi0_HeXf5|v{X_@|!UfNns*C7FVahjIlB@#loasZg6 z0sA`J-aUOh;B`ae(^4$j6cv=9ol#6?yCY50Ke=`*Ioh14dL>V|6!x2K!v#EF;VJ^%oK07*naRAIMRL_t&Yoz0D?*oF$3 zQ(}m3Mb*>UoFMXL3JN^$aXJX4sZ5rFs=>r6iH#;@H|3W@%Ys{58pg-w5z$HgTaF( z^V#L~$dl>0nPoQm>yPeAUnaBc4jJpZz~I9bCAy z8I2`UHGcQA=|#23mfJ}WGtlE=4y&r*?&c;`gvw<;6JsJ-Q?vQJBnsPa&C*0>V-=er zM=0jX(<60_V2BWld;^`@tyW1CQ<+>gzv~duoNThr55Smzpa>KL!a zGZ-PK!%hj!7V>}r@|)j45a^03Yns4eCmWf&y&!V~6a~-tJbuLMbeMxu7@t$ZDsN<>7J4wsOY+)Y10$afY5*S zV^3dub8B7g?DA?c9nJg?NcT=(DTTrId9LC}o6-*t)?kp0m4bACS*#O}%f@q<=W%aa zQ%Uv1QX~Y(nwHJwMR9ilJY^y8OeUwQDtJLeR!TXHBtZa_J|T_;+7Fwhh# zJupRvHl#q&^h~aR5fV78D9ZC_cEFb1G$RN32J@z0kf!CM-Y(5arVThnkndy0;dCklk3;|-3vUeh&GO!fGgEc-!#TnV= zi|>bHRLwms4L9EP0m~9pQrNf&;t`+C$N%q-Khty_?#{Iu9!<_LbIl$Fmgyu-VZJ(W&^@MLI!4DOHv4d-WT{`1+%# z^K0QFJ?(V?zt`z#4EURZ!QtMnFK^xZ*6`(CC6>lv6!dxd^5iFzFgvLTAy8Cp!)*R} z`DLCHSyAPJY?CC(u#XRN_Kb4|(XG+3$=L;fxovtOxMO-vX)!aIRNE8)cjT`O^SzMC zPA#o;w={G$H{5zOrU5$8-eM8O>yMtA$lg012BT{a9?!0B9O-Va4f=czM`KOEpm|^S zXE*L#zds5}t;qYl_x|QM1svvBeF`m3_S%miEV93oAVQQ)F5LCR%!#J3e28`~grKZxGQ%}E zAk+r{m|5G{j3%2xfrfx@c5b1irLjKfizJfc^NWD{M{h-T#1M>|P3pFv49NeNPgma|eie zL`9%(HY{nfl@LUKbrRfZM;op1fxVK(<-UYOHx8*=qKLv_*39U(jDOGV!f{{ ztsA#>;wqqXB0kxi4+vkk0-zg(SLN^4K2DMi!DiYzMm^jyiggGpen>UF6!agsej|Dx zZ~BW?`2f;=BW2gXaM1qtbhL!RNcb|v%dy{Ba8&Z~mSCqw?flJ8F?az4E|Cge7&%gb zVP~R?E;uLo*xQ|W#XvzDTgwWCrf=1Wjrod1kfo+;Q(_c%1XR^NcXQGrQuC4~BuukV z&3;yx<1Scug!X!YDJmpRf=YslN?3Ismx~TndPuS^d}tfT5jA?=?nE&U$RvbD-qMwN z^fn_ei8eD}*d{23=KUon+e!tDl&Zg6A%`~`rh)gJKM$R#q~)8ygS{?BEt+Y_G!ro* z`7+ftXS_@tyrF>bb*kj4dg;eC`H5sT35TU{8p+DRDxNmu3quq5z#$7h|VtZ zEyDkzrhdW!Ss6;NIR#G7N_p*%sb3Ea7EfLu$+I{vx-C4cOO%ePX?#V4glE2s!+PKQw#6v42>A2Y+i z5G$}DX+0T4DYe(IHgN!E-H)_*im#;!J5g?TsbG@~XmEYec7QJR48>Pe9@9W$X={$; z{1hGPkEV*HpAPUxzDnL|z;t}b*P?p2tUIfIms7Taq3-Oz$cJ+8P>Zx9Keh#jX~FRr zq5@QgK=4;qbs7@yCm7ByOl2JIq_KHzs?y1pQaGP2^AD^FZ_koFhR(JspU0TH^Ni<# zO0tUy1%Hi6g)76triME9RrMy)aa5~_xvw^&prqQgKldOY@!3r9{ONRT}5!2q~R-GLP1*Q1}JoKHnlI< z-{bLsg*Zn<=&uT1MJ-B=gT#}dj%$MuPpXGf*4<7_;Z}tp)wWw*j@_0RrkP z8qg87dBE%%g83y6CcR{WT+dU{yf$fR88u`l;bV;Ou&x^4f>BP$pRVhO=F+|QsKbkE6 zS53yuK108w4nUtvj4Su&f^n%WQ`OErHpbNDo_Dv^7w2Dm0P!99U-uufmSV(B+Ef}W z-*}4g^K$!6)TYg}UE1)6OuG|_65o@YUUndj7*|pq1^wpytdS+nHjQ!bvCK5v>|5B| zd0kEZLeBBHe%56A`p=$-PfknovFXK?+`RFh4h0R!T1^G(FI9o9F zx65|@ikfQs`pZMxX^vpTqeswN{$|?KUmYr-6N%gttKxeO!k=WWot$K*_=@Z!9GBmh zG3eh6`?jw-2st5)GDAKig#RvSc6_|YJF{DXBQyA8Nk_%ui$=2>`30Un&(~FlJky&d z8LS9-UFTedaKQ(^h%c52)pRx-S&&Zs%o1AF^5gHK+pe^vBA%(gn!s`I3KGA-{dHJN zU>90n^5qZ)wPDmxDvNQXE<}F#C<8{e@ON}qcuzhPMfVk$y}<{+?K}(S8g?2dhQ*P- zi`Ln_HxA|2Uul`3Qy{dA;AXxS+EA=koVVu(WoQE{2yjMl;Coyy6OhUm-H0Mv9 z>>%Q~?L*@pjBK*Xo^$c~U4*=GWQ6w?oLFIfB)@p#0UXAZlfcv|E)=Ynzbb4q^lEON zIZ8j7I$e5`V|tiec3Y}SKMVM*W!7x^?lox(W)hl|&p!;fqy{~RZr{$L*J-~9_IBh< z;;E@KO7f$AGWk@LHY^YfKG4(vKz}WXrL3pW5S53c^jv>5m_)Dbz=xp(bLzLs=~|j( z`rIwZ7IaTK$?Dosq+2CXJ=Vy1esjE*0>)+tcSaT=SY2+`<%x74XnN4~^5}&^U47GL z#@%F@$nL3l+lN-W#A1?d|n}S^}c#ql?#>_T=8Dn-k3g)GhU5h#KrTxZ5ml0UB14t6U|r8pkv4iT(NJdc zZ;FWj+x}FEdnYHz0`#};=tpO-BcHL=^AcjI%Zhvt*;UBaE?^Y@$vv0G`fd_*(}01T zR8Ro`&FWeHsu6neiG*QtfqlQ!ZFnMk`s~thp~sxTMlS0~YK8cfmpsbr!^(Mbe{L@N zVpI=>&$sJQPW%)xdEhLNrooFjeMG%RcjycYgO(&aVRMdEPgkv>tI~Diu+xCI?S+m= zMDE6kyM-!kv9$PhYxvS8@(-g;Urvs3+BA;AlRWhI96gNoCq<0rr7yCd=JMY%V zP$i&)s+%$w>QsvTj`0T!rg?*Rd!9B^!P-8ebb(z9`7X;iNLpl4hn)04%k)pEI-SYOO0WaTCb368XKmR8NI&MVz1`Zxh!xP9|D|evx zvsk`z-vnCt8g_3b7iQXpwo&nc&~(0uCKZ!t6n`ph_T)ENe~0wZc?+VSSSC&dR`fXY zcEJktm0sHV)DE3!oP+hDRPXI^XDFh)wdth=ykemw7-2Vn7ap|yet_*k?;F%1eSzWt zFlt)arF56Y zEhrx_CCU~oKEmhcof3vc>?cFuR+>ygoO&_oQR1jMGPg27mBvZj!1I{m`eMmRkyUvYh-@e=u|j%U}p&-H|h9 z24%4n|I6E4r#6pf$51(K4)#(z&D#PKUq{y&`*(UC^O%YW^0RqWT=K$upUIaB)|zreP35;0nmzuUNQkAcgbX7qLiE$5thf4lh9zb|8EEj+uZ zwWI^=S6)>bCmWX7qw=@~nyi2n|aXwoX8o0EyAPM_E}<)%CP} zmjlE#og^(U=-$_n{m|@7qcfqg|BW>|y!x-cXDYQBO<|5rJd<6|Pyri%kBPGZEvmVx zzT8H~)t~MV!o*r*R0;nj&-|KMA?}tNFk80Pk$m{Mb|ha=vF$ZB>(IF#Nl~%AiF%1n z8v!Dzb#$nfZt>C44S(u5z{BN6WFIccl~O}H?67LI85WhPH>*_ssjYo+V)gmZs{7VV zh$#MNpHC>}m<@>PTgs>61|vjJ*{Ezy9-`1i-CAT-;u+jL!>@rBnEHYVliA$VXl&58 z(SDOulBnMr9NO5IcTLT)B3biD?DD({?_fyI?M8_7kbqgb5L0S=R@K+a)r|!cF`*G4 zj*!wQ2bC2uba(cssmkWPa_TWFp*-id?ri&uq5t*%>yw9Xt^uu?os40;uGZS0J!Ak% zc_iYLHj2_jr5Qjxz;MG+$gO^2Ob&xFTpID*;j>D>KfI7UUfHge`l|H(0DfiJGiLh6ZSpF_MDiciS?IlB_2HG21%e(B>m z9Ou#dcC}(9iN^GPjk<#d@p4Ruk0_>jVF}IMGqB+XO$@UU-9Y@HoPkWOK^v!f`Al2> zXUbaSeYVFLk5JosW4aeA^p zG+!l_FthGz7?82T`r~Sz*Pdb{ZxCP>I>W0qCeC~8ne)jD^kX)LbT+N31L169o~5mm$Pg=8>d}4nz7aT zj2o6gZfSW9H_w7QFR#?@Xs4;_C4z7&Q3Uq~78q~wx-YTnh=ox_C90q(MbC?mu##*| z4MGt(SlHq1ILNH5S}d03*Ba(oGi)!{k1z8BF4E#qBh8Vxk=-ymbVQ-iQIX!-!ulNd zfV19DcA>y-y9dEHC{tnZW6u5yGRQG7IJ0vAtfTI(b`6PBU%p9{z)K=3b2CN#PktC1=(3Kqd#d13cFsLiOda5t5p%Jy7k7X_84E;?NS%w z+*l0pE_owthG|RqJ5#YLDv!VTq9nAt^jw8& zc=NpKq20P_$(<}+_O?={5RHo4cX*-7qvM6ijl>FQl>Wj736*4cIg`vECHo5=XzALY z@)sFPUbb`8uVs+v;a0l~qIdYwv1V75M_j$UO|jvE-uHC(q+!){wZprI@S|r)8rI(Y z3xDhor&7TVuy}@yeSnqK9Q>%h6z(ICnMv5JXE2UeRq$|7zMcO%r6(r!sqa&`(AfBf zJ+tF4{2!n8lP?@c3Y<4>kQ^QSeV{cZM|cq!p3U4R4K7BF8n;7mT5QRa`u=PcM`mrM zC)uvF8y;CTRviUe?*sBWcf03ChD>r0;36&3mMqgX?F();%Xi&x$rUuXw5_eGdbv0N z7nEG(cKJ|ow1Ye6PT7UnP7w86^!X2uP&-Y9(>%*C$d?!dmDhvwvML7g>@&w)(cR`X zcZE3!JbHf6mhZtsQtmHN^G~guYM6xx(=-Q50|u?nmgy`_tz6{qZ~aK%FfYBg1chhU zW@WVft*NhqYa~^z73Q8-Gdp}ci8Nvqu+Dz`akWbS{RI>GCpMcDKIeMi_YXo5{CCp3 z7fj$dOcms@?s|G{jVGO~sd!W*L~HmJ3Pi_v)MoC5(FUJDKON>jsl>6UU+ZlIP;kC# zM2o2^5L(#=fKQ`Jo`5sPO-Pjm2V$S=2s%P{jTZ;Ndy$0%82B6s%-noWdpu&)YwtpY zO$3+Vr2&44)u7hqd=k2`00eA~iFiCcn)z*mce85jMSBtF4@jXg+mj1%GN$$l{(Zfl zloVL%oIeWs7MGQvc{CY__!iXnX!I@Tc?%3ux@|8T_pW`CD*-rdEPtqD6Z@IvPmR=m z&kZoy=pNugvJiK@gl?hVX)y;>zx$^v5(B)gj)LQ8o75sosQ$$-zA@LI!Ea>xw3S)9 z0@Og9m$*AQch&+*!=Q!t+so)?{6SaMDLorwZ-sMN+CY*T&72mYP~eTS{a>)V0F*=% zskEX&r%;Ek1WP9qjdR=rvb9dQ;LfqW<$kpuIc_tft}s0=6d?}#_J}RLCF%=WRusrE z4_-U^(!rV4i$-P>B6V<$F-)tjPkd@a$S)8{HjfOSMWrv|-c-!;lNskcUg;#Wr4#LE z+W2m+YNE9EyrIS46#T;YVV`e%x$=`X4No1^!EX|{S!|>$(Hq`R@|X|2N<{uA6ITSe z5u9COI=XnskKL$(bvk@mAB~(%Ig_WG7sIEBi2)>zNb4NfS#}?h8>9rr)#Po_LtjGx zE5iGU=%=|W*j`u(x|#*yg#m#eK=tt(i>cU;dLHSDA267Z-1)4BBY+O;#f87?+oyy8 zE~VxW6Nj=2>yspAn8Tw%=&R9}akJ)-(}p#XE~k#db`jRfNDa^WiE7>D<%xra>f3x* z@rYPjPkx|OzmcuKrC^UF*9Agt#7*_(^vjCN^Gge%ldiOzg1i(+0{}$M!W_*=Vx_1? zf&n{fgxa9Z>ojuYY~D=!%fouPDb#*4{7#h35aj7LAPTOhD-*FR|Ftb&Ji8_Ag~~4A zT6QrpYTPL~Yp%}v2Ww;@3@%*}qu193U){Ei>|Y+qQn%c{$DBnPZ9r3!=FUTZGZd%` zeMAZ)@@(jpZioab36&%0fqwVwkh@Dk?elE;_Np=_Wzjy04UO%mA-NF@LHjf(uB5R& zkBxZFJ$_uii9PT7yx0*v?VO9l;CG!6^8G+HS?W;Ki;J0gJFcXP%B#1V*_D!2f(Xfq zS_}ZhIOmW~$y(_}x-<~m!&(*L8s;ahI=DjlPONu{FW;f`4rOx?`9%s(6;wN%=|BHY2!kyp#hgEV> z_Xt|%h?NqC^=&}OECY|s1)h?TA4v>#ji}!kWIc@X9c?^mGg1f(V@H*Y(3Ucx&-28e zHA?Y)C)b+1&{p7->MD)VF6_XZJYzNl_JFPwH{Lb}hg79~ow)_%5nWLl$@LLnaz0Sm1u6LQ5~ z_-(HL75Co=yjpx5&Lm^!o&S~CT&U5m_C<5*B`x#Ea&Q<%sRd=d#9#31Rl6!lgnqJl z#R(Lu5BTTEq)c5Qx>@xbWZ6w>u9t8UUzkRD8ar?mey?Soz>(bV$O2mh3K5?VB=?`s z5PzRK-g_9+_>tP#UA3MNMZ|w?2diX3MgjKU#j^SV5~Ugq;a4j9_Xr8C-as$v3Qm(< z5*LMNgzN)Z3<6os$b>luDdMItk3XQiDQ`J2d_7e7;DphIF0HD6`PyimFgK$?T2LI> zR#VoT|4(U4h!JT3kM|;On#t(fUpL~Yo#pw{=DfG;M#{i|qxC@9S9SuW9yrBJ=RJPl zpx3ep=H~?B!OORSAlMnVXqA77zr&Rk_;BBL?!+OLh?MH;Jzd_ShqpIPTo1w*RgObs zUz%ZSw@&mnBK>?I=QR?o#)ps>11^G4Ovi^PFHQ09eWvaSN$4y8yy8bv=Tb4Jl~}7^ zuAX(%m52f_ZZ5ZLTD&NVu{LO;SvtFU4!2^5M7!OmnNrMMFfAcrqI#W=b8?n~nI|^T z^d*Y=SPEUQDX`0juAE-z3&S#hWVkY%r1A6!(%4hjOUEQOCH0Eh;>_gr+p9y`3F}1_4yXPo-lwx9^5`Q%8M%raUA5;OQ>> zZ*sYHgC=uBL_ZOTl?X>1ABku>e?yD)1LJl03Y>=yH4RPT561P}U$1osDFr{8s}G-~ z%$EMKCm+BjoP0+qRU|%+qEcD$6Qk@{TmDP-;Sd=5ZBLeoLF|9?IXOREZcnuU9Tdc6 zNt6*|z{1WNcvCV(woQyXPTJFZYiAOkOqI>g;5kBL(^Zqy-$R-$%s;;V0V{R`EUduxEtq-yx`5zVYcKgx0=Bi0=dc%I)xOwJ8pBcO$>I^NGdh`aqU3J1`C6MD zeutFC-?|N}T4!Tr4ehk7*u3ij>>qd26(Q@bz3{**UmA}qVDKn z>>gfizVxB;IeSfRRU!_&KfO|Y*IiL#^4s47dRI95`45HQiu^%LmS^DA)X?CDy}Ov9 z7vmJ9jpy)Qz)n@pR`XnBy@zQ{{kL(9q?ZuCq3-T8Y2a&eb2W4DF7QrEh_yk@{9N+! zbtDmwGCfX4%{*OIJ>_-{1H8P22}UsCh0m4yTwQOpeNDT^jkkFA8MZQoA8pJ|IwO=e z1-wrI@4F(#U1di&`P0qQ+05t)lu+Xjep=tm2(y_)b zYxE%L@e&_Adjwdgh|$oX#FGKIDQ<$lcw*cF{d~$wRn6uuDz_quk)cBrw-$Bkefw~? z-G+IIVA5D6_^xw^`1e4Mqlv56b29;Px_Vuz*1^xn52d(x_a6+jb>^LJ3?xoD@+_Jw zo9Pc-rggNkj}*vg$tUAo#(yP^Rc9X%8cE%bN2?xz=w9tfYXVKGHE_b6yF1qsZE{A? zNcNwi+6_Ws&r`N93o)Y#TycOoIjhhVFB~J#Yg^6lFj2})rk}KC!;hy^<|BgluPF=B zr@JR$tMF{91^zXh%~rg)p_Y&F>%>|AS#Y3@rHUlS zsyLGW+J~GTE<}36FFow>h9kib`c~5^@OWC<>fAw0$)EV$zDUAR!h1d+wQZdU`CI+7 z1W?%SWxN@qn8>HNqbF{Q5B^FP7Lx^%v55?L4#F8$m9>KNGsm8SgI7gCPI1#G?t%^? z1cLJmG;03}vaa{Wb?-wAEHz!{3}2V)NfJyVfzj-?l~C$rGw))m;hwqX%=v!Hv|xCm z^cs=LPnKU%=EH6j)5WtH2{i_9faHM^x&g@QldObFt#FfCb(xz3SO=Wkd=c3iNf=qC z$f?-yX&~APDcZ@W;nWkZx-$=&tru^5agCJUg_+T~leA=U+QOCljO&GRa-LsjRO!w$ zMv;T-MTsm@x2N-A*-aagmiGpZodWTS%{<(hv*b-;_7P6c-mt4kXJHqSzFIbhD(?sF zbbrUIRQ2Q|#xLBDo7|sdhRLTwhMQJksrjvHy*P6(5M$vQCc{LYwIPU!gX}_;^ANb~ zdqKk=&(uFB8986j^r0fJ`2%85-iIu^JNf$ww~MpWo3qW+%@wF?u94_aM)u12=Gp2* z^h!>K2InMA78aFQB22!zSYcC*Y&9pvI+RI>IA|!8 z3V1!p&2%iN(qu~HuH-^zWrVJ_-Y!=#5{S?3tT#JqV%U?WEVkaRPm9;?17DuTKRQ-u zahZR^tSKAY-dMw_(Cb{g9}2V8qHn(yw4Uqf@GE2|(plQ{@tcmn?-gn7_&TOd^gGtR+Ns8NokNARfswBfoEE8ND0uz^xA%Zh7v#bx`6kc13J_G8w)s7QyYuC$yf-=1e>#nSE!&ZkNL znSGBF@?ND>dL=$E(Doodbw7%1!^alu9r1=7%LF4`O&j_PevTsDXf0&NIk&Q6&>8lk zx|`ew)YzN3$?))$F%TIJ^UQ;;(J*#k7;e5mhmxvlNmj8=jF0}%B}~jbdFx&Q+D9}4 z)_lv_jgEj`C8obHO9YCqYmE0#@#ib;g_h7IOzXf3EYFvKYvc)Mk7=6p5HGVR;N5>2OH-u&22c0>m_sW zGVyTmL(djAvoN0FGaOm34t4&5mvEk)hxrnyXCujaml$nN>hqRZ5w!TPI9d<@o3zlg zliN-zwDVZJzeD-&RdJUfo7dBr zgNncHoN3aNhkqMqZ?n@DcHNf}O5LP9Ngm&iG-c>Z8!8x$P24}U(F$UpIzXALR})Cj z%}W1~BM|6qp!MxapS;n*M@Q^4@M9M<4^8%N@s0|qKIJ@Dx;dp82O zhpdjZc6t|!tD9oNT>Z8mqX!2?j=!lgWJ^_6Z0|{jS&9NYt``*aiGRhI@p$fzO-}c8 zQp}C=r3Tq?yExgs^aegW3QA*eEiKt!AZdM2GE)hBn@Puz9wrzdAJ6!N_YDV$rgX{> zD5^Sn+AX#iQWhmqOK-M>00OtnxrA8kj# z>w&$3y5TASuZJwa`vJ;|+};-p2HdUX6$@vF>s9Q`Q7x`i7F^UAW?TNBlVD3MXKfEF zXEq>h#}itJT}!;WfrHE)9ujIa*wFC!QjU^dvvpz$Q8E3KMVBc9IG$Q!Tg&3vtuGei z-fsgoQQU?V+WKEZk&yJe(;IL7Qda%kpYih(kG-<8O1Tj!vo&cYE#V*3$wStZG7!W~ z4GlY-9j(k-bnMD`mrq>af!JRSke(YcMS!#vKA$pl-K;5*80jHig<(mFp+Q{J306Oe4GA-k+4WX@xE!dgg^Zh~s} z(PejP=WU?cw_few%oKreAei|?kSW<5Wyn?2(cgRDhx)ahfLwwjJ&GzhY)FIHl{{1{ z%tWefwpNEG)6~Nd#3&5-=(P96{<*^ZH|oCWVrr5^KP`u z!}Q1H}vwt7yrMJnb8%L^#1qLk< zn($cGFuyE6?&M-p9rpL`qNa4rLObQM%tajZ<4l0Nf}Ig-{u42IEJZ7msuDcWZfY=} z2|LF7(Ha0slXy~PD4KJ~O&l{41M2D2A8!!Lz>c2kTT)Tz^~*lte0A-VugRo}ER?x6 zspM$#)|ll%jmemqBTPdz07hM#54XKn&q32w{|{SrxmU>es7L;&;?&;VLkfo2=DKn% z!DqLwb0F zL{pV7WpX7(N4K@qEox-l#0Jwhf>A(L_{`$^&Bg1fzU)!pGqm*Gcgp!y{V1JD_{=L-D&Q&`4%aAUu z4=MWVWNmC=w$+a}EY7T$5m&?~G=A2PMt#JcwL<=>eO?Ybz8V7JFsahgmsb31$)p@wxn zzx5^*??2L_NS`DJ&uBFNG@M-L@C0e)SO z>F9{EF_t5J{C2KYaUT99@cFg0T;5_Ef{{L_!_vBviYlqSnu#q5^%Q;WZ^@}q>g~R{ zYLAg)qP8@jlH(3$)3(u5k4q!qwK)q&JO8`l&Q_k;KseobUQ6-4@K(FvRrRSHfZT$>~Be7l5xCg^##$QtlQKl@78 zO6B1YG3KX;Xz&Evd-~`X<=s{?QK`UP{M=;qGK66e_}Eud%VO}nQEJF+ua?^yscZI` z|9)HYK)%Q$eWIO(Gp5+X1?O4k_^@lue)~bYT#G@JIS`)Bl10bm7wb>mKa>~Bv>PeH zzJJc-GNu{gOqtA%NBCV)DhN0riH`EPBo5Y+rknSv)HQ1Cmt99z?s3ye+Z?!Q<9hCV zx-Nr=3lVP#q`jKJ59z6qmGyR$8s(U$)<35RrD+g5Dd^`~qL<2%UEXN0QB~G;P;P22KG-YDxaSPp&NMEN7mE0nbieO;5pl zi0@6+7`p2BelqVLBBo(q#BhvO-t6^!Yso9%M0xScQ+YtKS4?Hd^m^S0)cR?xixQ&kg3r(o)f^)W~8&De#_Vz-E$;tOT)xQbH6u*YP( zd3_<%=_to}5_JbN!`zUG=V1uHYm~v{mff8h>CDJJ0|tjN_qjpuIiTq1KuwF9kceDk z&M7x4+UL9=U>2nxHYqgU{E#ZvDKVaU#V;5{+72z)_4_QGE4cp4@^Dl${%mD7c^ohM zQEHL*cbN)xy9~brd;7Q>!458o_0L>iE%b^o;RM?_u`^W84qRmy(ffZ)(}p4&0*9$$MWkw-VFsnN-0dOwTMW1R*gjFnQ~}G3Yrx?hD&@Zm-$!et7gu$0ax?X%Eo})=ImdqlhT0VQxL_DD!bB@4-vc zi5xS6WpXu(9*g~ln1-1){r0eTLfE-_4R3nX9!6$|bax;4K#`+Z2oGyJKF?&M_ZOa! zMJRortLNph%jQ|ykg{|~pAN?l=+_>E|J{@CYIRh`)$`h`v5lv#g%Pb~a&;2IVj>>$ z7tsXHlN1^G$kjcyi4*HA-7b6K@?OH3x=Vj4E)+B`y>#UUUH6awMr$$$qP}`39rB!wb=c5bm(}ajQTB?c*WYvxl ztM7h}<||;(SQtS=G_>VC!oEmI+|M&#*rru}Dg#krF~UWmZlRcA2oB%dZ#X%P zv>=EVCMT5{XDYbo$LSN}T9)>7a)=Q3D^A|gik$(W5XDV}_2jxKN1bwE4B2}Hv+}ZL zXVSs^J0c04fo>*e)5(EA=)$)xT$TayKxtKR9!;vI)i{YX=N&}?*2mmFRBgAUb@zm= z4BDaOEH8H6$?u&l-5yT4BgGN^J!&FvS1XV(P?Au4J(LR;{=s_Cd|-AH;d_$SEW^Te z66;T!HhXf~`pv)ds45!T=m;=5=y%xn_1y;=v6v};o%+({%^jo*y8Xq$j_Uy}et&{T zeuJ+LGX*%<`9CZC>TzSvkrgfHYV2G6?H!)ez3SWQD5Ie@RQB@fwaq!N-75Km{)S3+ zRb=bWWeu&6E7k&i0Yqy^Fb55>XM=ao{X{G`5Li@y{xcJR7&-0joINj{3;R*f+oeg6 zg$>VTyj^!D;Dh%u!C_bK)T(UMRGG5m@M=N)%dRmc{?4ysLs_0h_@wNuh@n5)U$CoA zn-@RIXg23fb0*86#w!_|cKdb)ULy6Zc#jD^;v4;&N!dxa*|(|=o*28_SvTr4Rc%BU zQ>c?SdO&4W2vz3hKE3ClQOT(KMrY)n$_)y({762BT@kH9hpSrdHW{nX=NdWDO{;3e zG`%IEo|9bgw~s)I?ZD*-Hdj+sJyyBLs;EJm(y*`M&J7ixF(K0p^J_TIlB=)JAQco-2{Mvtqut>9iDqHny_6ONh zQ?9JpXLmhnug9Zz23$rC+94`Q=-Qq^e2?H9?Y_6|XJcyBX-=W8pwhV5f2-}T2rQ-8 z!>PS;pBK$oOdiw!$*86#jUF9klqF;CJfA~t5?7dH4LqiA^iZqNRZ`IopU+V)Z-Vv( z>lU3jX2)a8%evfrSAiytW=4pwsQ8onJC6<`LN zao~?!pWq0ltf~ESXBs|+>oN4Z*#{Tys;RDU3a0lr9!q|x0ZgX0@9U*FJmC6BBWP1G zyqIDBa~(~@P?ZeS{1nPyeum(;|tw_+B6S@`@MWmeS-$XUFA4BVC>t zOL57hB#!&xoOgZ(?`^`(R-LP;t z$U~C>2x;m0H|;WfbrpPpa=K!XST-yf%<`wS$$k~h4{w(Lz1F`lSRA8&A3d=+;A+zbE9AUb5G>R=l-kYd~eKoeq%m3B~Kvxsfxnum^i8cgF}zg=lwNMl>e zAGs#qFR;%w_!->$wA!yMZ(?ec`!qV{Il2)g6Ko^3+}Yx9J9#Y)-zsb?yNYXlOL3zS z6v6KHsdW^e$KCdo$K#; zq;f%AP%%zO9)i+8hi(Hfeo~4ZHcC zDphqoqO+fF&O;VE)}NsGWp6-)FKgjtbcS74thDmPry9H5%{pplbtjBxx4MG5 zhjxKE*v)HHB7rePzr)AfeSDnB-JY~e_8Q{R-!_jO;=ByDbwKnLW}y%jSYvi6ri#W= zC=q?V3o)BMPa#{R6edcQW{!9twQ4d~f`E~;nBt!B8si%g%R2?O2aChCEqg7%)~{ZhwX zSwvjwT}OH%e99aBi;bj=<@2}=*xWUfYu^7o883FNZn#)@4f;tuWKRSHOm&M%SH)0r z@=6W#wjoTgOn#)ZLuRB=QAH@GgZ6sU>3&CvF{lD9q{XL?zd_ztn&ngvFoVhu$8o*uc_EkDiN(b{tpRIH|Smxkgi!uS~! z2^#NY(=Gp|-q6Rch1kzD{+eEZ8QX47E;qcT5~A)t>frjDAv<47{c^{q6q3x$J8Zti zR*>@Nj7001CgC+ylJAq+GTFfy!SB^4jk+3U~4&}^&(GYlfbZ&JVYHY3;xs2 zEG`?Rvdb(l$)r`Us~fNNs}=v6C_}iTh{jG%UAvpJ#h)UrxxTWnw7Wad?&Ybw#}?zm z>%FLNYdn^k9OXVh(!F!sm{&3M^mIB*&&`P+-nq-eq9c&INl-Kbu@!@4W1S5EwOlAP zGQEQp+W-k<#g*}NIiN0?pQV$fE-4nW4W6z;|KBr@sq=wv+f#{Rr*VtFyInq*d+PY| z=Y%vt!q4G8eCke~<=)AwDv_0ocfqr@m7v7k(ugmYRjrb=%Brn9C%)z`Xld7@{W=UJ zl#vrT3|S%iCYS#wUWx$uAq&)Rm(Rz8ol&ZQ6x^|@qoy83J$L1H=lo-Lb$~rqnQtB+ zIsAJ-*sL25F9&~iByBy9^!Nax^Wfz(lo`$4Js)O=E=@Lm^0!L{)69VvBFYruF^IgD zH)C>YLSjQiN!n6ZoKKBwBL8aCis>7_Dwx75dUkV_36mf@UaMqfU3qD7Iq$@VD(4ue zaDTDRX8=J(T;E{><^1*e4#B^selGdR0*cI0wG|Er?yYK|`Z`i1=u`t^P84M!l%9I5{S8cKl5kI1T@avcg?L+(+ z!aPhdnlkz@Fs|aW6yzlBYUpT54+>to6ymHd&c#rlP(`twU?+1$1#fS(165K)gk@VZYDv|vmzvY1D zIKqp8zWS#y{11$oQcT&H7O~jk%~^ycQn_xu4cRz{_d`w|AuagkY%6Dl^0G)$byEPSZgrR{d9D zySe>V*Y;l0yW8y`-K8YVM-z4|r2v=n>+axu*=c%mjUFer`#a71i`TuEAwqoUi!|*! zk=&(X!n99vMIujk1JwvnMK$JOEv+@dVj$el2*ATK56(uK0XoXc82R*(z4KZhP0B6n#F_G1d?N` znk99G1+`)AdLwiNMSqvOox{D@f^vCTI^);kY}l6v5szlMzxxdIwcaW<@$;9+vW1M{f53n zYYn~ZOoL1Nz-X>TYx0KyLtIU;)1~3t=+{B6x4BvougB&7cWbR}ZtB!FG`T1K zd=JlamT0=^H3HI8;v!qVv<9nx7Y>{|aZE@zn&ju2SZ+3-2V23G@PFE?c@1Ul5e|f;6?T1- z%*f1qd|Isz^uAtbH5B0E$Nrp^ncp!SEoEs7SsTId)vEzO4IA=QQrzzR^|dG;RYqRN zNxSM@=bl20QDYTi6{UVod~PxK9|3Nqx6^y6foWtno$w&%EbA>jxt5`VgG=Pr$txDK zE5FiTUk+l}eSc6Z@_2mkJpg1q#Be-oPun6vJ;s+t5Z;i;5fkP^q(0O zkS-Uc(ouqMnmew~7IQ%&2;|{C8F?{bDK+zO+nrkRH@VtU<-F{??8}xo*fGlPOmF$@ zUrsTH14_2|*y)L~=#KVEQ*iig9PHcM!yzQ9JVSPsftL8Hhgdi8`pG$WNtlP2kc2dr zMg=yvw7cDUykn~K-C$v1vX<({;w!VKC+Mr+j56JkwIk9a)Rc3kTqXr`jNkKURh}ZO zLO;(uyQtjirT>ptjWXhs@Wz>aEN8=?yjiYW@J06e`a;r#S?&5+%<#@+oV)IlXLn1W z<1+X;LaCR7QK0T7?cQgdDk1RUdK-=S3l1^s!7P z?G3P-K2Vs8dbqXio{^B%T=RX^-hMo*L}nrZ2({MA0?U(%5b zMXJY#pdo(uBaqCrpLI#<)VpRy>|Lk7N*_sbeli@yFi*zmPsVjrNF|SEnEB8UcHQGsreMw1@P+&2pMC!4!6wrV*G^0d;X$pPm!B zgivaa&;4t6zwz*AowMsq=kWTrEz#@e>1nblf{@^DU0!sIxR-0loD_q-^DCZXcSBF} z(dJ)7mk>_=w>zB+FDd~c4i>bz_g^Jv~~9;NwuG+}swk$nx%P>#)K6bweZ~3bVELJRQv1lLJt=TIadFVwYO}GYw5Ozfk3c?U;%<9 zxVw9T1cC*3cZbH^LKB?e?tuh%Y22O0-L-Ldeb@b*z3+2=z{}NNpu1SrwW?;#HEPT; zGiUfiFWgZSa|ec3O4FXV z1sr@wx%ADcNd!=PC_an>{P*(sG7Wyk5jJ z@O;5PPeyAH((C zuJ@X}v9ef!d1vvKS{kZ(1GpXiCuiA?>|42(fJp>(E~3DfYSB-**}0&*sYi?7Mc&J~H<{ZA1QzaG%Kook5XZNe@AZfE@#pQJ>eAAEpqQH1=kx9fA&XDJXQXl)Z`^3^8J$klrAXE?gB zQnitJ%O*7AJ4IK^ALT!5LJ1*u`#X9 zbMmQN^1d=ol=)`79^Z38`^J7Au*@KeGo@hty4w#eWs-9-o*ZBGzO$`V$R@3);3n(N zx2_KgTBB)f#TnGw866r;6q0#z688wzS)|IM}nv3h8F?c_J}x zZYuSd-H8<8_3bX*Z+kf>+WPHE!G4rb!;hnVhSyjWA!+0$*^8UhT5R63x%_FlR9$%N z-zk@3%7s?Z;;A7*bSs#->E-HD%YCv^V@xHz+^AMb^(vd`}BaYkbi=}IkEqU_W2CE&s=rKa8ft+%@>YNyw~kAoy~m4vu=BD%2z zk35&&$th?Z;5~?*t=*dNr}f3NTVhkD@{@!|%gUWBOifN`{Nyd4n*e_%R5imMFd>)V zj2y5~mHoyTvz7j^#9TxxKSNu{g&Kt^(O_>vto-mh*_nwMH^Q0P+S*g^J7-MIm1`WA z%M8y9lG6NRx9sq$28+*-jAkCKYOgu@x&dH_$;(F)qm|(gFXT}{5xAtp zS<+_gOfKukb!N~*?Kh)4{r`oQ0#&vA**Yp!wDvjjjp}tmOz5vhrVmc;qPeEQUJDCf z{}kL6H8X%WEBE40zswoQ7CeHA4i%u_o|NKrnfaMjUiJ5;u&B3QJoXmWHYoQkMY;S% z$~hJqiTO&Vb`OC?`@6SOKlQU-s%N7=&etwi48LpT=iw6)n^p~e=GXnA6Z9yoJFBc5 zV?s?q%wRMPg4sns>b>jXOc+Q_+_r#3m5wrQRZEEF7maRy)ft{g&I?q;Es)@fOi1Hm zqW(38n}9i764n`!Kg%hlnwf!LtI&vQf@cIsco^enDEQt%l%N63fdk& zbZcTZ!;)TicES1SOPiCFoNAyj%cc>1szWS>de#S6t*M69dzM7HB5rniwIUGu%|_?Z z#B$>hYpDq}gOr3!Z~BEhH$%{A({fmD_Wq%6GAp#IPD8v$+M3M{b)8yeQkttJNY-Ro zgqw|@C z8mrsmTpw~id67FD{WAV1EpjLIh#)ZdLumD0MNz46*o0E=|R(DDX~U~cDRdYXhitvv3$u@ zU_~@eARej`W#kEa-Pw8INJdj!7ssV##hJ*ZKN|a7J0ao8j7Sm3#@wwlt;n12S|nT3 zxe4;%oK>NA-Mc0co~7MAl%*p{PNvrdF{`MeA_^jeIy0X%zt%L{w?D*v*4@j-)S$Pz zSot(p)YP}_2$|BUQlKTA@a4BQU?B+ZVNzAqVQ#q(5f91jfl=8oBg>DXa`qAs6)HYPkn?s&pLc7fb2F3<*MhsU`;?VNDkXAd*)P zawm2~CKPq0pRBfSuSJ!{xZl_7`wFr0+p+e3kYwkt0cg#{mopQhV1vhzt_N+RgA1^c zw(=$evg_0OaL;s>_q;K;J|`no1zc2QemGe4gQA5x2eAS~__$daE5u-u_3aS(6kZ-qy-sBvy$V@~BeU$Jcy(+X4U3cbh{)@# zj~0x-ySy1=9!K!a<@F)s1Q@Q|Ty|Q33CA3OD5zyEUWG@u_T!-iDVZfN0(oC;;U67` zAwHdNZO%FkkV^v*w~LwfQ`+fTu&d+xZOCk6clw12w+!pZ7Mf_QBPJDSA*|kz5CJ{9 zj6!ZZaHu$(WLwSQi!TBvWPA87Zb#~eR}&&!^N#du8Z9bf2!rCq{u^EyDFO6?ckJ z3E09cC#wlk`62>-R~g30`a%`<&h(y}P#+qONh^Na17pfeC@xw--_f|FckXb@ecBqIq&RFhhH?_xuI;~u-ph815+dcDajLmH3-2OckxazXw*p(XK zjG`Bb0>JB)Qr&GVy5xZO?#BnaWwi0(|y0RT&vm1g0V`vp6MTxVSNp5(>AuZ|CbS zuKMn^_qt>p2xxiPuhNi$K`Sd$rxTgB>7nyHuSFZ#e&rzrL7405>R;W zcTUTwmnn-rZ$u{fIdAkkGYa#)CtI*OS09R3HlfF6zzb)(S_fbvch0Z4rfTX~f!cWF zzir0fSBgusuFNm}wl1qj9IR~`D& zym7dFfLv!=hJLHd9<`uGM%N<18_PiGBpG#y!D3y*K9BbyHCePsi$hNXb9m?vue$T~ z2p21x1SUXk6Kd3>#oc~u=vV$A=v&{%>Oq~9gtaN}%izU}0@Y=Hg0{DqkgMwBugr}b zrM`B}?aC+7%mz#Tv&{TqiSHTOt9a?rUwaEDdD&1aRT}YOv?JHsTB3l@3)uL%x|qJI zRykh8R?}u(Io9D{=okn$DdB6Vr0q}4r_+^OjmW8UPBY@#szy{A@Luk>F0+%ImNb=@ zW`Wixu0(vUG?aPWceGCOSDK0zeIPfk>eiE$LW|E^(|K*TYt`w7hDwY65X8FU-(x^2 zm%xuMmZN!BqnEPr!aWPlC={1rkho&wqBf1unfLgo?&F!3^>K0jWd;0&dXLSNaP<$d z9&H4x)sR2=mCT7!EAG5&D%`n2@l5Rmp-5#=D%JXhuVEy}yQ%jq%$oi(ug!DmImUNK zem>FY^Zy6!{MluECa!c}CQxVe+rnz}erA17D#t?V2#ceu(X-7SR*(tU8W?-Ic{|;C z7WzgD+W7Ep&C~)Nl{Tm@HN`P9b(%K{D5@zfTv5ClA|ZD#qbQ3TPBD&PGz0DE*oCth zvh8#SC~JQbSBwo+dD(S0m)R^ESq5M^TFvDT#l`3+E6s|E4)!k!!f{kZ^k&G(lapF@ zD@JM&Rm0fUMd3#bOuig4kYs-2m}AI@d6IuutGduI5bV7%62w2f9#3Rm%K^kybT zjHB|_oiY___IBvft2u*dgk&T#wdh*-Fv-PDUHR2FD?}YLyCTYt*wpXyM!jHzh!MPH ztPT0qu~sG`AP0_Tz-3wWXb10Fl!$m7{?eWVWUSf`QjUT#J6TuV@A`qKU?1bntc<4a z{MdHJ=QAGs1;T^6QvI9#oEE>^ePU+w6ulIOpwiR~e!tqLJX;^@3;mbd4Eyp+47m{p zXc~YcE%Jc9SK98D`~Ip#B@cIF!|Yd{)?wREKo1g)9+n&(4g z`}8JXC;m(;jGuaZ4NbLdf}DIlo6lka=0|M^sNo#YTkr42bb7rI#;tIfa#Y#>99puR z%zJG5zI2TP{Cr(bxj$!#_xC?3@{E-^l+q;R*Nv3Q=9&?ki>?92Lih7Bz+WCn&c)PK zbOLuqMJ`9KV|HrfUxtUz9!^pQ(Moe?Jnf%f`GL--jlJrcRIVm69wI8=l9ZMc&mGL@ zCfO~wEQ$oFfCcgkDlj9OmhYbi_RjV~`ZiZqmg!^=D0Jv$BBa5EpOF8d1)O$P(BoS~ z$n~;{3S}HEXB_G9uVQQC8wS7+qK5BMg<-Pbhc3W zLqyP*60o0L9|o(?i*Y45$J_caDJg{_f6O}Z zJD)id6W}R%8M={YHzOmX&A~GVCLSA;T2(uF1j_UMzMQUd^fBQ2q#o7c^oY_Mh)tC+ zlG*66eYt($)EojnZMbw8J8?OXuna>W6{GPD}%1AOnr4z3^E)xR>;U zCCCC~lu0a)8(qrwBti{-L&1JmT|9EsL2KeWErVNV^~k})^pZx}>|y775Se-JX8?xN z7ihRi@bPM>p;Zc_p$N4g2lo>naKI3(wN8F?j2@H|FI2jW?!1+Bwe6Ea^l5E-xt;3M z&k!pc^a!1Ocj)nO+aX(*tdWft^K%?aE5FPmeXa?A;_Xo( zhln7^dA8t6fTz@4mNwo2`tePq@9l_NrFht&8(S2fEGou>&sRFNI%!5kWWmhr(PDa4 z!$m~cIg+mOinrxU+AbWdP64+_W7v~d@V+DEM5&N-oU4!{PYDn$h$H|R92Ar;>*7_L zT5BPYwK|FMEKhYIIqnVSH@(ZPM9iJI(N!(GT%kyAcH8K;M!sqFbl}+(Rkg@bXm)-$ zmR!}rtRBcWMT95o$@RGN^`pkN-nCes8@EbT8<=2z6+gbYl>g6j+Uc9Xv#2hHm2W5# zim9FJ_X_PNXS-oVw4Ca+1?LbcK{jDN(}U0MR}2SNr6WgjZ*N{pl7~`a=hgyB2Q9WH zR%Y%t#D>Huw{Fhm;eRIGkR{WLAW>ZIUZah`=f9K-qzrwR!0C!p<{0mWjtKeIH`zOT zQ{&aYcNVQHIe2sD6Jv6&w~EQU{kAMUajvQ?PZ)Q2>l}z(1~e5OA4n2YuhM1JmsnMG z7iU_Si7$DAAJOv9WbM4E7ev@InQ-YlV_duuH@uy08tZ}`jA{F0La3FEmB*2PP=sJB zVdU0%8Knvx@GqwB_Z67G4(>Hywg3YdfyU|zJC4j{WK@!8C*;`iXnu*Jq2a8eO@mc> zz!&VWw~%<10r)O|B$O+dNFXrYTe2e)t#@(b4u0hl@Jmu1sBmia z6OJ{ktFLvgwQFgG<_EP^6W)#TE&UEcyBbdR$iTrr5sBNNCPdrg~9sU8I ztPNKGr}ggs5UB@Ny2tp>I^W;jyirz{OCIFI7!g{!gEho@G{fn#UJUQGX*90p z?yUQ%8(ku3yz_Y8N^ksgqUlGO*`2?~Q6act#X1g0%jkesR=)bjspnRUxu%1K=D-?x zHF`bHL%)Mm(NIgaCew&tT6wUj_4xPOg@s|cg%NCq$=CIRxlkm41*CP&9M!9e5ng)T zQ`_fn%0&M-!E7ag;+iJws;aCi!S)u?RDjLD8quayIk%dY_|#HFd;XiT$^QC|*~a_* z+^jR|h*Jkw)#wx}=BCChZOmpEN6CSFAgBXeR{Zi@{t`wn-d`nemx8~%F+6@P(r5@@rq1uqb0Cwg zi?30q#|vb0wM_VI9fYWeTQXzk{a&HIRh-rnoZO3@=PIqhp&b z4BVb#^u>W@C^=4+2vA8J^253$fD-;24s9y$`P(m?OY>ba-`5Mya-=XlgIrmd>A|iH zgYBYRu`F7z>Lx<6gF?L5*T%&F?|C9XJ0hn>#S10Ln8sAp?9T)yPABaAu}}Z`B4P2S zh{OmSx2Dcsu3j58#Gv^jLzq#LJvx5Ch`WVw!KGARCds80tLu}AD`m4ySkbQ08-P|Ovab0?Za;9%Il?lomGMM5D0}o%~hx84DJO-vc zw;lg|h&4}|W@sN}WADPrh!6*k!D+Naq7`)M(8vW%H?;&zAkxdLh>he zQ55n!ZnIp0`6Y;NN5f%XMJO1A{y$^UOzV%J^NJa9<_ahzbwmDHEIYcbR5?vAHqVd> z3dj-{r-c(lrEk53($MVUqf>_ba@Fr<6HOL)|GRCq>>q5meexCVnuMV!t2M+jfRyHd z3rhYZpGTzIVq9%$5|8q5jj?%Hmb^c5BB zO@bBa{7bPrOFyPT0MgfCa^{o+FECsGzjv*qYLHzD4z5IwwfCx{y^8yNAZ9Fd`{GVC zf9fB+3r?`l|36eth9FFinBA)SuOYD>`N97lPuj3h((cSV8V{vY}C z^B<)DxsHFT;0GiE_3bZs70>?+H}J*`>G#j@y;V@ODgx}PGTM&h37YYL*=xYt?nZ)% z2oY>M>F8wMcHez?F;Cr@dSg?DO&FnkKPMXg`37)s?F)v$xm<6EcH$~bpw8UfTI@%e zWoj-8R1Vr_Zr0l9PL(l)CEY>t?@Wb*(?BoLgWrkZnVM~B^|QBDR_0F1i0Z8Z3wG8( zVWk&$I*kk&Bf{hV-V5ML?faT72(oOxrYArdc5k87<>3{PaSSaBE+(~IRy`f@Al^kX zChN;THxFoF9rPd8r?FkCZU)I)mvwRetL^-uNe--B%dkbJpnm*tvAj*G9L!g5{`)mb zNc!pBC`^>q|42^$q$&7gP99d~1ZC>Q6pdFZD$IWRKmR-Iy|zDFZ#rAsC_ct5P|bJR zjTl>0KD%v{j_=6lg}yxykN>};>`&8E#Hk8!g~mCjO&DXPTwH5T78~b}CLmPoJ&$9z zTSw1dWZMz`Pe;hqis7tNwWl4yMlwFP(_#vG9R+=7JKDf49X=wT5#h*IrEQ#l~5WW<4l@N!R}l0nkdx zpKT+8t(T4L$0kmSiq^PG6H^46D(Rc(WW0CC2zI)X#KUd>?O%JUL4xToM(r2VK%h1) zoNZ@fRCCS`DDlKkw?=c>OMU#8!3VTx^;0W>L)S#&)o0uZf87~~%Di*O>ttu$NuVrE zhWalL84j-Es|Ex~14UelI&EnCA~n}jm$@5>j-ipjSa+7S2hVuk8~#oE|Mq~;kPFX1 z+LA0OoKl$}Zm0KkQDLGfmgcRx#>{=9S(pWL5{KNY0Qi5F5YSG8sD%2p21)>J3lDGl z2yB6f@ZHJWv6`wHCm8d{wsg^RrWkFHh(yM`N?bAf4fcPpRDt)O%GBv2I)cU49N6v3 za#4}2MDCgFSJn8a-mmqGTzoipw}*GK%#tYZSNQ*Jrh%@oG}YQDgqlAI$Wdfq+&>z& zwdJ=mZ{2)|SSBL4-yf{i$?>Q2nP~ZEQ3EagRjWMFD}JmI8yXy(|JQGe2F;yE41Q`26sMA%5WH;>2UTf* z+QcqG+*pnlLC@-wa@cibUG5QVDaWtbS7@<4ZX0Zr&w~YY;`Fyd>j{P+q0J9O{M>yC z*v7Y77G8YOxuaOgHjH)xr(W<{wLTVtRqstWrj$dl^k&zcm!|r~e9s3*QAQX1xmRiE z;+d+71`%LHzWP5Vp~{IkuQs5}*yP_I$Ca>+i%E)lrM_I1rIt~}TQ~sw@h(P#izN|VA)y!huDF`KyEZQh}y(JmU zdGw9x0f0{q0s{&)ys0z2P12T(Qe}#7?!@4ui~geIgi6^^XMPG2WQ!!$Kov!amRW@u zW{4IFQ6ed+nz9qNiLl={x9XMCN`;q3v-K`jdGc!!iRPS1IXgS1(hgGC&(*%>ku6b> zQ$!();FvV(tO+TY5_~7hT1`+zp!Qx)mF+8@aft4ZrM`8A2f%Nk-^(w4C<|g1&$Qlo!Jg%trYBweoFylq|n=;!6@>>mG@-pjT3k1=ey z%5XW+B_lE?yKhszpn6<?VnW3u0bnX~~+?A7; z3m*M~uudd)rmj4QyngG72tP({&DYa_)+*ozu68JG&gRiq^;*8+=5GWB=GU!Gk6*{lM%f?Q z;2^#ae}md8HyL%Zd}m;Bn3UzmMH0zEO(lw=h@vMmj~Rsdx_VeL5=a`U;=Ml}n03UM zvWII%6Kjdi4Lzl(?4Jlf2O4yK_|NS+mLp>9>s!p+&Qi*nWjxPd_q!NdQNKPRObg3w z;f^2NrX#B~33Gz?9KQ&|?9a@~nv{GGWI=@HJpdggL?s^3;OV)O(Q#lLX>F+a)sbEe zZ1UPf`wx`k7_STBuyn=vA1%)!z2{3t7!Nc&WxAgjLjqEnTKPL}Zs zu!MHB)PAZoOLCw>j{4gzDFW%0$#pr~4DD&DiZj%(Zl}sbV=IxHlUxi;~ zD{y++uhzfVCP!Y81@5tPwI*a*P8(ffEhg3XRHunt3t)Q@lRubUH0orOM(Q|#CnnZP zsPt0gBzbimxW%@^YfC`2TLsgzHJ;^tf{Ns}5P%sd401@id#S$39F3vg^D!`MZEcq9 zcnFq!uoPI{Jn}4a{VB6A{!OKu?r+0)e9+DIM(eVOLkq}k^vI0#Mdt~(aCJu)4TXwBKu9f4tJ*w7R28e>_~ z5|Q5#X9q#4)hf!IVlQiNx-rTP=%VT@iucZ>=87AF|Cm)4z-QVQh7JUBHpNd}Abt8a zx-n0lxcz8s;cT>f#=B-Cu1jA= zu%~z;<6*8&*!;N(LRr_J=U$;K;54KuV~NV5urevQI(_jv?4059)FMwbWkIA4wj`uB zd#U|vsTH&^iM}pxK>tg^98MI4TjRWW*$Ez`ooAK71sq|vl3%l89b9wIXr0J<$YJ#* zK*lX~FvseCE!)fE!gY5%U#n?ievA-&J{_`l-Nb|s^vk#_;>NR#oV~8&j)o576u9FI z#p2%e7z2KuH~p7OEW-DK83X(uKFEY7AS-k9H420myVY>m8GnW>cQ*7bsd#4yAge;QB!p=<@$(OI{@?IP&b2@g8L1Vmt-9YvGMqMU($}vFgO$kW=8V7CFw5<5&ZIylcxm%j+%lM?B1cj0SKu3pz}@ zX%xVaD=S(~uQ`S3f`>>M&5&gvqbR3>KgX`sZ5+_elm|1tUG3&~d+X$a5Z6%`)-ilZ1~sdgSrt z#xm>LGV7TpTPV<=!?uvzS7tjJ=LwzWmHc(un7_TTJninmE$cmWx?!gDw-?=p#Kb$nHys%U=hO&zjUoE!K z84nEskrVfHph|_&a$YpYGdI0yDWBG`ZMFt5qViqfTS~?eC2oS#bTbj$vy=>mtDwfl zxNE7R#gpDY*W%9_wReN?Hi);gZ=PB7pQuw};gneQ4rbRy9V~@) zI;GnYK#VMfV`^r1Kh%_@O3mRtvtl4U{Zfj--_Om-?b~j+Wa}7^wTr_-jL&DZ(#nmF zo)#AqF1I2G4>e&{|D0PfZIo*jyQr$JzNv((i7iu<$$*PfSYt|{y{%$ZUZ|aTQIz`4 zlrl+4TN}qDl7*4x;;uB(Pt5U_Z0&+BEQCogONXM7%zcS5ER93f$(3zf2L~u0%B9^8 zUbnc;h`cHVgxTzx-R?a3<2J=()ZXs*{xH5;{=o0z@;@!QVuR?_D-$CE=7LJ3$2dqxuNTuKqz(*YU%uZ<#UEZn4 zY3s?DI)RUZ2?j9~%zyGv%4Z!un%={0DcCY%UwTM<8^#YG8c?a!SQ@9hmkU+;hMNbU zWvV$;_~xgAu`xhmW3pOMa%9qHJPzrzc3a_W0XU)rr(R11;92(h{OS18Ui(#O{9rET zr3Y%xp3VwwVSqw*4mGuOb52_I5Cg;;z6#Fwvjwk37EOVv#1D83a#0IL+)o5f!?=GB z%=oibjq=-r*Fl~eR~m^4Bl!4gvmus8HY13ic5fgibowgQ5W&xXYYWMCFBH%KbBar& z&H$=72{!E0ZJDB?gHk5j*iW)7j4(8$FSr62HQ|8`6|p+Ha;o-LM#*rUsTRNp(Z5!!jdXzZBABUBx5H<5LT|{?`ODboPsxZ;x`m}wZLr=7D^njc3 zaWIsEg;YReqDEiu*3rY|s1VbR1d}Ce^r77MvW}?*-atzO3oPSytluo8;*-Uf`Jw## z-Q>sSP`ZG4N5Mb)p2~vHG^>6Yn#V`x!mXYM3(LsgxilolqHlk^914^(mlcFjT{*bX zjDkXIFGof052f5-g&o3~u!;GlCj9a@(Q>LXr18IuUoD%zdwc6c8cmKPi>_s9<$KpM zG@I-@#8KS#{tOwAJ}qY{rItx`Z8(7I%jvedhkf+({r%_S(txE8WIexBF5GtR?wtn` z{Yx8Gp20h@I=|u4Wcp>wnL?X!g(T(nvNwZCqa2DU`sGESybTWiKKw=`bY`~I%(5cPh%W7e*lc+feP|KDlYYXFAT2)6{N9(MnCQg`20P2-knUS~TPfco2RI9DH!s}F0TsVFJ z3Mc>0rueq{w-wLFNN?|>>lsV7(vFo^FpPIoL9b#htVyWZrgIaAO9#TN=ZRQ6me6{W#nKJe720p0X)OGyh|vml3XeB!j{>D=oXLC+e9L3TF107kSC0L# zs|KCa92&LBwia*OSTbY2fT$oPMNDWGV34sweC}?diS7P{CnC0>^f$*xomSzd3Z0ll zOwoqlnIfL2kd?9`M*YCu5_t()JoPTx{cg?^%Z{}y}cwg@fK|p;1-Km z(edIei|;W|>PAEa{fx4S$T*uSlBM$D+ots^dlWZId^rj4)K~qK>uugIurqzb6oL32 zEc`GG0qQiQ=mQE_G6g-GVV}oNb}-AW79#tM^OURO;cDiQ4=)Ar!k_nURu$foz*`tH zqIJ^D+5!4|?oFSd8smb3V^5DA&la?_GUTFTH4c@(%YL)BJzh@i^Sic8I-1K&rgS}> zDU88fQDfmyXcMG8_=2YcSb%Y|91R=4RIC3zG-%eV@;I_U4iF z5*c-U4h}=3g84@{-BI{5H)$VNAaQv4zu>;C^_j!`fR7D!9LNrRc7w@CzI**794%AH z%3PFx*S+wdUdBRGzw#q~`48;fJK|%Z(~j)}K#Sk^c4Pj!_jQsCB5oq*(aLT&05%}N zjS+-D9*!iDeji0`9$OtpfJ@hue?0HZSI-m>oSP1&)zOlBivHe~tSdEV#eIR}T3?^e zw?u><+-#TBzoM;DEBdYF+2S&M@rGVf!2@c&>37~T8=G>N)%NgA1c+qU)_dxrZ5eQ@ zH$1PMfe+m3H9U#MLfg+^qr}4Q*~@=unUfIU6kKe%5?Z_gz@%^Uy;upC zvN;N0o=Prtw(c%QKl07Z@2KL|%jDqxZETH&>a=>j9Pf%#MJ=HE*GUdutO~d_mgmh= zEKbJK;g&6be7?M2mg7Me$j1tTWePZ!ocJ$Sfe?jn{c&6Hc^tR z9#8qmILcwKm+0hE#R)9xdGb|IZS%Q=6@wZqA`+StCtR$a< zkzh37CzW3GgBR@kPw&=I`;}VLhK$^?EF(3xJqxQ{i?X!ulpp1xHtlqmrvL~K_f7nZ zirbI?nw5;$BVdLz1RP*Fz1Ejyrjh0l14y^5|f@DXSvgxBl;T~_dVc#(>KO|l` z*z6zgcH0<9OkXx?5Qw_N&fKXu-jGSXsjjZw%b3*K1?4p`(3%+~Bfg3+mj;zl)t?uAIj>UXdQRJfCvaI1SJw5WO5Iif?E$|)q(j*mhl z1M_3qfQo3BrjK$^#`4m;=HmqmUc%FNqr-cR0UF7~RNY5PzPFtz6|40%iJ9s8o~*(N z8USiIrABmOX|FkArXFkkclhTo!kx+=K1^>x%bdsp3xcFKn*%B0^hY-h)_E}@t1k@` zx60UkWGyOIf5Yo}rnbX-azZVQQsWh@2m*7Py*H#N*P0w*9a~GedF~FxH~Rw=di4kU zEs}B7==uWn@_&ElN{!Tx+qC}O4_-Zp3`W>=Lgxr)XjA5k&R_xd&FA~Vyx30>BTUO5 z)4FlrxYo@`cE-{weYSfWoxn+=R*<12p_Xcu2Qm7_6_GuP^A-uPST|3^)_xs@Ngh1D zIDZ|du8(r(!yyy87~S{v4@?=-Oo=*Ddj_nu?3NX*Ap4!+!c%8Y5l{LKc9cgU>hrRP&>h*@mPLO3T44V356nH&zRGJQo(UH(Db7L&|Az!yURJZ` zGkEi-iDI&j1|eaZbOFbgtUpb)5Mex$jI>$jmQENHj-sP|Pwc5NhavXyK4mlH#|1-i z_0YFvcCCHuKX{4wc!|Gg=ZYLT15v-3Iu#ir3fiIV8aV>tbO28VSl5Rx(#My6zkJ z*?V1BF>_Q`R3#30;HvkZv^c}yv$MIeg%@U3anKStk2>al%P10BlegH)*-1cLPssdF zKP%^1#z^ACg_y5sO$s`htU&@Ybt*ZV(%m`v1cciRu!y6B0Y~CyW>Xv24$y5iUp@Ky z7=+>n9(AR^eMPmg84$00vEVS@BBmW|6sKIC57KM3eV%8FY9XEd*mgBo7(j{vLN6`` zs`g=tvGHSx_gdf9P9o$JbC3z@FG(z(=84KVXJ>w2K#(^ljjOr!0ix}79T&cetuM&q z-ICIZcq6Ex{y>r?>&ing4n5qBG&*t=`sYGIA~tnn+$#?UA_|(!2&r7%d1kvdfIE!7_{rD` zrbyEqqJwnHvQ^gZM7%fiog`m-WTsU_hZj!Ua1*0k)^JS+|DfQw2=2h#L@kV#h`My1 z4*GFtw6}iydv>!ZT*7NH&GbvN@Ffblb9U{Q>ByKf%kJi;sVXRmfO}0npx_H-rUHDP zWrta?KX1gGe)cLRm$?p{$S^+t2gQh)sNaMyfcuOwJuAMt=Wg&Wz7K{Son=Jc?a?x*R&F|M?i>AjXM%_CTIvz@D%pF|ITnuu{f>{J=GI zJ8iDN?7`cqMMfI^Aju-+SJMW&>q7Rs+N0Wm|AD9wuJJjq?GBh4F_qBg6~DVKp5?0Z z<3CAuORcczulq*hA*=Wp$m+91BAzDMfAY!H75qHb!;vYB{b2#7B!>sj<3r}}NOpng z=yftq8!OQ`53$mq?gP;vd%@xc^nE-%Ou4oj7Ry~nIlJ-iBArW4=hO1 z(&5P&GI}`u6Bpd)PV_atMM0VP!!IH#DlS9YEf8@O6ZJ$~nwjxhfo#W~S#Mzb+SO{bJOZ(i}%eli*RE<_KW|>U*g%tnVQrQga-U(PUL(T{zo5Z?tz24`C``d?om%d zLYChnB#{Nd>=qfb!lr%S{)u^;}cSUZ(_Q zO_Hp=>c=9fno_bOEdEtKooah+FB|jKlJ8m_4*TaW(^Wxh@SoY}5-tICZh?1_FHfuS zL?<@(9yPcA+aCj<_*gBv-RbvWuF^EEQmh}XfkLwmH_>-75v z@;*%EkI|X@Q|8!@i!9~3tsr~{6pfhyiXW>&ey>0M8r4oS?>)YgroTFQerOur8+|!j zWC0`iTK^%&6h)y4`-xHGwNw_)bJFUy9)4~pLAwQ6soRld);E+GeDm%ibW7jway}yl z)bbml55shQCb3t<^CH36M+gY)AEV^kuTC9NK2c3EeIp-zO49dXW#fB=Nc!b+8DBn2 zh~42{VYQ_aRdv!Z!KuXRG~Uj?+IHdiT92)AXw&!TvQmoGVH^5%)7q>?pGZK9%jfa; z#S1o?F5>IpY)6(NO=kCf?g4jqPKh!`aw+TO$cux+$sfjt@pQWFM58fZ@e^nAH8cJ^ zb*k(AZilhYa-q(#J&QM=pfD;XLUNF308mm{)|U6Q_4$NGe|ZiYDn~cXq>Q~ChwKHQ z@6X2@>K|xWGRA(iWbBEeo)VKGqIF8Dmk9{)W50blaBKa}^q3Z|o`j#|Ppe|bBN<5< zjP>Szl$3OhZ~c7vp$+2xp;~9;*Xqk;z22ThnSk5jn%~ovr2KNweqk@-ZtDEv0=d+N z{_`@8n`9q6nZJTx`|RY*o0!r{oIuoqJ0)Q1>u9BEOQrK(Ly1_V0&j=V@xKQ5?E`zQ zwe`lj|14|-Uxs)TJ}OO5_Obp(q=|#wtjH6r&A+Jqd^(aOe|;mIoJ^XL9Gt7M^Oc*7 z`P-p|&FwqCivw`JsXwno;)=&X!ZWmvLP%F^IHI9d_`1ja`P9H(d&tr8h$pE`n2>~p zVovjSbsmHcPXaTx%4`1ozPUVu9}mr)Wx4U?dD}YG*yGRIUEUA-EP=a?GqFu4nLV3p z%WXs&CD~v3V!B6Fw0_P6s$@bITLWHL3R_`3zkY zIPGi&bLPg@VvHLmw*Krpf}OYzOUE<8_kJ(4ZF*YrbCbFKB3=&(t#vb9Eg3hG)Pot7 zs-UCcn2xUH37$8?rbBn2?{Ww@Z#-QEX)ionfx}g zn!V-4J6VL=c!l{_<`$N8h|1rBb%pJ}P4i;F2NmEYnkT6pN62dsYAaYpw>4Ei0VlK$ zRuw1Dw^Ligk1N>P0J06dd`0%O+kHT9vY3;Kt6Mv2O3om6=J4js#)Kf0K_zS5$WOTg zJ*L|3n+-F)T48&`8))(3QDP+s{iJfS*9_tv&sgrR@D9#NDSrh~B?4fW@EEMWo+=+7 zTFhLqUrWRcz?{J3D)E6uJK|?G@8G=z7TZV+ZoFc zxFP#VyxjDQX7MWjuGc`pR)x*XaI|0iA&+-lu@K-9*e6%G(9jIO|V@n(t04S7Cg7bFXBJ)0HKp zD0lJQh0})03#vZj@&>@f1LC%)Wv|EoKd#;~D6VLW)&&B=od5~$?(Px@PH<=%cXti$ z!GksK?!jGxJHg!{xVyi7-Z^z|)us5uk8Y~>UcL5QbB^(iMao>45947NGVw5au_2e< z*d$MR2^pAi@|)% zkvz6hZR~=)?&^vn;pQLt(+`VdFRLmlI1d)x=I5nrAEw+XiaE*;PyB$8ETA*wnb-9U z1hWbJr;qo37`uK3eCzrL@!pSKkso{b+Xa#He;0lg1!hhNm7-Nf3Qc}4&s}}2O>8g{ zc?gnX7;tD}XiyJrOhY>;*B1e9vE-A?RXQ#A2uOm1*V6G-LLM&`M%tJmL{q5rLU^ZH zb-U-b?Pm{UpJcAQ)`~30X*8hVGDvwV5HNr6^0z?7)wQP1qPo{eJ_9V3PCIQS9Q+;< zm_Sx8j(3kA-!PQGK7{P7?X`~|SZ%n1fy0A>{DcE@2=)n@e!CA#O``@j8quk+XmsXR zC@pz5QGs#pXJn!2wIvO_>w*;CUwS9mTLiHr-RL9kLkG?FlMSfS{dedE()NOu z7vg-T5NxEXz1|7;!S>e9f?s4!Yf1Dw!Vx9gyGNa@m|PF%g>)BIj;#T2bE&1B6ATBG zU`&`^^>GkO^%~I+i);L2nMSSFAJ!H=T+l2I!)h8lD72$XM#>y0y`0JMbS1RIF7v+` zo5)A=5VSt>>^6JF4pwb=85q_6VImjd782m*p!ul-wk72M7k=A|DFzE29um%V|-1CNkTn%LzedMzLz@POnb z0mt(7uRr-9&4Nm(r8sC}7rE8a0#Z{CD0WK16=J~N>9U&=?vs@uu~sKg6a{ouER=)^5r>-2cezaxg}U z`?Imtgg~je&OkfWGDhxT=(|a#)mSuB6OQ4^VLM0WO)?F~(?--%j@Tueft=+S@`e^* zVfKy?Dja8esI4;$Y}ii=3z7(U7y|)_I^c&16n4)oiJ4>+4=vt~!bN-ux|Erzx9Sl~ zW}LE>G^O*08PxUTR2lo{0dm8km!;b;F+s~|q7=r@RxILhy_5#~FtMgy$XY7!F6!V- z;zi^;N18atTyn_MLs=xx72uH0E-iM8*?HUjsb?HM#f4MGl}x6Lh+uEaV6@a;-2bG5 zG|(posPTI#dB31F0_i#8tc^AeBdnN%m+@M!+TFkMY*RmUcgAqb#IfZra@JI>(C0Dc z6^AEmls+pmp++n?XO3pO8%n~I&^pLjm+ElG!a{S{m>21p(ZVq&>ku{BpYzK&Q1WH% z!+w+~erU&;Hj>%lo9H!Srk{}oX0*PROi7b_EoB1>)1M<%nkqznS;EtjBsTsnr$Ji3 zL>kQ0^AcNz4vQ^LX>DLcE~cep$4UjrLHMB920psI6gDn?xqE)QC3*jo{dyVuzL5QT z;rE=7{dO_uOt??kW|uW~Rl)Z-(51Q*W6F}>>VX2o%g*K{<)H_WXacLWSLx)!BDiuy zE`F!=( zWKPRSW^D_cqrXP8q!UBL=;_Fw928WOTtEpu2V_Z((I|S7NmQ8*?Ol3TY*uGsb#s(;F)K%Tq2P2OZ^Y`)Tc&n zlII_s=u9fEOy5L3f_I}5qg5Iu60wY18NSM>RcR%vNBgjvnH?Y9lTSP?|FgJLTW$aJ zmG(#_t#VfvmbN5cN%~?>b9)Xh9PB};40K;&S_%x+>0liX=AV z!&{34d!{s07QEoQyhBb>7Ki!A9D#u&TOV9U)r=Kw*srF^jsL={ZaEu70YrN2m@(c5< z(7!XvB%usOx5*|2(cN|;7)NVOw9KfpiwP8Ar4bbNkP=~HF_Yi=`eLNw6p|T zUwHGieB#nv7bxOGe{ZA`W`f|g1qhDcPl`84QqwZQAl=b_(@Wl^T2Hu}oS7jF$tX7P zi0k}X)b3@iiV8v4a3?wEDpqN@rdkaM%RLx6?(P?a3w}9|n*lM)T9liPD^X!K;p3b( z;ftkus)zw@5k^+6im2ETRfM}yAoc&!Sy{Tc>=WfbWu*w6gC)WNG>$9hw(V9p%WVY}e>nW3zVUyy5MWomV1c}UCA&eznH zDf_#*la816hY5XfM-P$OOg1>r7}Et<|574HGEx90@E+4Ggu7XtIPE(= z)SQ_t)AVgLRFuUK1mL6w6KL=rj?p#POEGck_=60WYaO%4&;o;zX}8^MfiwckK5V`?g~nMsTV3dMTzBw+pEnBZPA;5pH5H9(oOn z3yLWHRGT}DshnRrc+1H?5Y!7RP>6c+Ziqz3nmHN>gA9(Kyq4EDTI|OFYv?k1mz3w% zwU8a$w&EZ{OOVyb%kjvIZ8pjyh<5iBt~{WE5elKRlku9>_%)|F zN64C2VA*Y%a4t$iFP~D4?M3{IMm-f-F+A9aQGcugyYz$FsyLk_A^mP7#Y|l$(=EKf zN#oUOISZ}?b*{C}Z`sW^j6Valsp(37Gz`ykGW7sF ze9$j4Fjm)OGzRKZ$NQkjd#|fE77|_FPf{4#K*C^!%Y#*w9+~$|sNW~Y*$})$Bcz`` z&vmZA2Qh#voG)I}`}>r(xK(urEdHE@V+d8)q{0m63EZB0c-eBX5AgdmD7z7eCW9FM zF^FpmgKhX`nw?vLGQ!I(nwanalP?Q{0ShHU5S@&FdM#X`t|eCkA|ZP*MG@u^kgYGn zTvZ_~K>wjKG2#8=B>N#uVA=NV|sH| z?25krn$dL`PB_9a+yra&nvBGXg=2Fxif8tUK6N8mD9x&PbXv#Zct+VJ@tOjHq%*#P zFVxg2Ey8`L`Jz8n70OFWW4;>PU-sl)mexsqY}Nik0sVe8QdTfTq;MNN9k&FhtL~YM zd|sM2*LW{F$kO=AlDOsJC1gkJlyEIqo{C~GrkxeZg&a=K9%TC_=k3}b-= zZG)Kjc|KxX{NN!n>j8zd8zRs&(fb2YmS!eeeTj-N%=cnDRTbW=M$#Mt0kl&^mRK-w z#6V)H9G3o)^`}0tY{}NFKqYw(3Wpa1(c$hITwnl47(5@bufyIvo~E6sBy^(LRY=$K zO)X;taHQz+ez~}D5??@p+S2Y6)uJr`LqIC!0EQQ-zw6}`&L&Oj`A5th5o^t3@L5GN z!4DM_=|Zn`vrW)Xn1G$t2zf+x*Iq-9$!E5q%1w1d6y5yNTkuQ9{>py zc%ye#g(zLVpz38>I8IHW3BhzLBX1-xr(CRgbffUtG^PnQOBGyr)1pJTJz$>9{zy1U z)}!&0{~Hp(XIL_t(&5<^y6vljBA#QFZm7~av=IncZYKgmDJOWAPh}4b%h1!`*EkY!&Xb{!D3LqPWFldbp52cIGN@h)pEI!cB>_M zjC0$rEyjnFk7TUrEWSiuy;thc21``qfpi&!n&K&Ji2Nfg1+D8FYmYn0lK`xuuwiy= zySD5wMMw=BrEzv4(K*3lE~~b|LjLfd?m+#P6-V%OWzOU%MODe+A}g;lWt%N85^UCe zt1d#$)M1EDO*fgB-lFG349v6O{`6zJKcHT0p#&QLu2h*YUcW~;oc?0^J#2%j!DJ~~ z_|~mc+WIR?YTu#z7TFcb^ILMu3ng(M@4MO?e@Xiyt2<|`+!Ju^B0!;yOxKNeALGrn z_Q4QMq1a7xa7TnJI>m$|HlQ3Y;B~3E`lMeV5V>Z}wXuW+@eg;;*;mACY_%#`#*Y1F zlxIW}|8t3`$u;3j7>t13p8wZUB&;SRd-s$1sUyST*uhxOR@Ow;{J>sFA>DPOU0oqs$W%-*^i8y-t}Y{gfDN37~sDx_gyM;FvPV5&mLVT5f4u zacom&A0@?vjUKHaCc2AJ-pc zMh_DiKIqNw^7S|QGxw3Crl0Fz^0Ht%p3p{?v#xowMW-6I2zQ&m)+(u7dz1OY*-+nQ z=UM1-$>g4F*_Yks_^RU~N!m*~_Dx>u;16r8#NQ|U^T=a&=QO*Nq6E{|k>UJI1ynH6IOCqK zKjAV8V)u<58nnf7@uyISK`oSK-wV5UWXj;O(8JqiJE1Q?jyyh63{yi-Z_Hs)``3FF z>t$g~%*p)m+@3r5xE)e|h!2)g-=)8JFDDsnf8NvRcNWZ}MK}72TE~o@%EP!>y6ig_ z^W~Vw)nmWB#G@iOC`_WoyRy7_C~-70*%C`Fa8OOAy|`5xcxKc6P)vQ1eRZ|E)-8&4 zU`pv=d}dI$)@5tpbd)FTs@3dw4wmR4LGgDN&bjm7%oryore$TiZI>b?b@;|&n_CHv z6E-0e!jnMM!|ih~&{n{2Qp? z!+4b(%E#8U2@B2VUSxKFV;r!d;?;9?GXS z*FVtSWNMo<5hU{+At0p2Q2RUE?@R>jc$P$?+#E1O%+=CD{*w%@B$@tnmY|pZC=3GG zPbI*(TELz^1N61KgmE9&k&DxUUpB;ER3Yn{PZWl}Wr9*~pkvUq~U8e0AtfGheN#-{rLv@+JO`mdL z*WJ(1^v&z?Q=%m2@k8Q@cy$cawZw4k*E{X&=Ui zUa0k)scmOYrVW3ZCx!VPCo^glYVQ2_YjVeN_~-s^>o+8*oVx*0S%J}&mxKn{&nH`aUOupucRu`6{L zk8?Z`ZmM#Ks)L>8citnrho!dZNTu+s#S$rqE0SRCcg4R}kD>ES1xk@l?_O3E+6#G~ z(2S1P4ll*VjsKR#K1Jq8LnfD+X2$K@C+I+Jm0C#EEmZD)97%95{C0IT9!3EpZoTvZ zv@oBRl8!Y=1n*iBmkRry^>yEGj>geh^hvVyy>V$!DxMsb+%w8yeWSHNjbpyxLsplW z7lBsVwS(<%=Cqqi^vRQ-D?^_oc~vj$WdelhAGia3w=#<;~+Eq zCgRxBT)mqrSh3s!P>OUsE$cb%@^(eO3tv(Z4xoMz&H8|XQY8K}wKu$BcuGObjxg|7 z0wv%&foiq%b-UG|SN&xQirZ5Nq!?{zG*{c?1BnyDlV$*!L4urP$QdhC0YWX6jTp$A z1>}P(;xwwmZc&eobC?oMG~wcdU3J4>V0`p=;J0M?gHpavha(oJ%u0ysC5I_lhn;?N zcgj%tO*SRG$L$y>tFEhw`V|*8dl0o;_WK8f4~$5&8Xsk?T5Osv_52?2jaOB`*ysNMcc^6dDMbz$L_~P+_ak`w*I{@4S3rd9Y4$rd?H>m&>OgRT zGoP%bu>uY&4VDf|IWF=eoMO%jPB@cL@!nd~^`C~+Pm+CPP+MbtHw|INfuzlA>;{c+ zLm5**%{i>1JQoo{&NAwtn*)K%CBy9p)F~L<^B#m4+k}$-7|M{Zz9eLv`YHjEjh9ht zE8?Znv~iN*`+PKaW*=l1g!>$vm{2h-Dq~fDcK`S?R74cf^l@DTRoT3abI{oT)Eb@> zeAOJNmn~TlsBNS*h~uEco~7KZ4TeRuhCV)VQvCgUGkOocX%_ z%OHOE6O=z7Wr`p^BX#ZX1w^a5v$x`6Yq*ZOF^lU=JFTm4p_Nvuv{wTbP$BB1>U#ZD zqvCoDr6KV-HV%<;SOi9x@{QF3E9wFcm~c2X647O6K%FNCDwyovFi=&;Z znjzeYV>h|@LI5okfKu78a2Tl zZs<_&tE5)MAq_rNxMh~qRxD&x52~G2!E^RXJPd*D3x>8l>v&LF66rsa1dG-?e8$68 zZGDrgl?(j*;Zq)zFb5156al7BIs~dpO0fF4=eVV`os=@cI+ofDR!orV@BXbPlLE{A zeC7E02j5s(*|;fVJzfu@_xA)m7AEmQhF(vb{Z79r zu_rbDo>Ia_{`jIc!#2=YpRuVOVDSZ+gJ*&&WWLZFfJDigM_#Wo!rk2^wg^) z^v-*~L{J*7br*%RHO^s0t=T1$U#3+hyHsM0Dbjs{Bl~i(qVSoTTOqK<)Uex8EH+Xl zjm6UcmpMA=5KaV6kZAX@GuVh&%5Z6mReWL~8-%_#cC7!xMWa?v@E zYYh)6mcJoX(=#(bx2@Dm}&+j z@1Uw1J-I`k_AP_xsP7lKG@F?+Ss_rYyv5+=5frq4Wn0Pp#)w~n2!DvSe9J=t5k$xI zuIdWg@vvDEqA8nSSl8CamNBc1r7;hn#1Q{Q3dnoDnf>dqyDtrTt!1<)Y{AC{lY}ze zHoPcOGeOcHxvcgfsk#3Cy5z^1g86Yvp6aRuJ5y)>zFFZXVt`+%Zyemcc=4r+*eQTW z4UB`L=3}S&y9CUrgD2m0r5`+l)r=RP-S&DLj$2x6c;UN*`? zD@GoJRt=o%5;ac0kN7_g{=+rHVi1mfeu)c;XEfy2ia=yjfE8609H>ii4#r@sT%p*3 zQ$IboIJL7SpK~YQfut$1enC13025NA$;J2hiquSMMcR_Lsgol3Hi;t)C(o8q%$_|S zob-&T^qf0qig+twNw)2$+^(`>0!=G?&2gr^gVN0MOm#bJaax>*)W^b_CAh8?=R1Qg zQY1)c_$SX8>(X*b^`VtHJJ)BbpBm4%mQ+3=3Vt%`uHy)v9cTWNE{$D2N7|DlLuNPh=EZStU0kjScT*@&F=?81f!<-7)gLA7&gR4>)s{BpuwWmah;XAWp z8s27kW2n}nXi55u?r7`3h)Z5ElQCr$L*5_9y@2CVrR$~NTW=HdMyE(qZQf_I#r?|Y zV6=NeCCV+4x6|jeDuGIHA9N>uG19Uo)p|1wDB{aHh6hF~|IP2}%9cXzx19z3X7A%S zS#B`)O}2hboQ74sIY{< zB+Rt&IG0{+;Pj_uZRQQ1-}Na5_&Yc$8v0(N!vR@s6xi6#$fY`ZJmYBwn<2(w);+n0 z{d@g=^cb+zlA;mej7TCruZ8hdKo?9`rjR2fulpKa0Dgk_b04nHP@rPAYZSs>I*z)fD_pt+bN~N^FxrXru^kZfylw{)e(2%{ z^A{GK1s^-Ius?Kk*p3s%GOrQSe~MJp;os){x*!6C3x7enspkenVAboMf~_g?heb<- z=bukb3~B;D;S))hc6_^ZKwz_QK_dg^IpG5Y$EjAr&S?7=?i!P`1Tszb~yc|dCQ$GgO+_lnuvHqEwZkz zmYVg$7E6$Gc!7{fly2<}pk>b(#ygLSq5*Wt%1N+qO74ThUppX%!e6psKIM=HWP{}^ ziZ~kL0R49Mg7?FVa1ug_j8YeSi5dSaof_)SB{9Oar>oEGp)6v=MqselQM&Eg5 z?6;UZ*VLk0k_{gcn?|7;7)gjvtFN(ez+bx{HKHN#mta8=zAhI(m!rO|_46!&J@*#k z$uXHchvxE~U4yQ|9fH_z5h2_2eC7@=DcUQ&@lEym!We|l3~(uHXi4x0xv~h_9RdGugUr3G}!@#0#Z=-g-NTMoV30&znfrr#mIXgU%3|RkhLvKyB5=K^n?i={Xd~!9W9iR zGJzvUEE)6U(?L|J8;yFd+X9iV5aw$7y$V%5cq?V!-ku@5L6Rw5RM(;+1L?~bf{(d# zOH#X@+I4Zgf;nz^x@=gvtvUFU<(5pY-*Ru@$izg7V+vQuGzu8fSk&l>a0HS_+_ccz zL+}^_S#;WFRP+V@E>#U(Y2FhvRuv(TVP-dx5eqHPsH@Gr-<01o6fGWoUCc6%WIA|C zw^bm9qp=eb+0dY`*`cMq(ZlOn;|qS;*i4>Co2`$*^#;bUzbVOWW76F zN^NcS+W0tE++QnxftKb290gCv`XW30Ks>;RStX5l+1p{v{2Nn_>i1fuJ>z7jUIx89xY>v!J1lv3zs z24$Y&*1+~rP^Z7$O=~h((bc!HLqb{Wj+50YiCl8*zQ~H(J?BlD;3tHRUBhc z4a8hD{sc^@|QdSX_^dX`S}%@4n~r38I-8DZWZvo%{Ie)yQ*lwK+S8GQ1)O z!NjpL5sH0riU03XAX|UmHpZZ{xH6MotIzrJtKq{KAJ9Mm_O5m(U91o-+J_y@?0w!W zyk-~b_xG1a{UnzT7=kmE-JFg6_yVSWWgKOcr*>vg)yqe67i*IWHe!M4h1(ziVDh|b z8S@pvF??ggpEM(J4zd zugl#96m)Rd&;6D;o3m?zUcjcPM(?`B|C=;wa^l^u>*=D6CF9JZ#NNK5juC)kBeff* z^JyvMC*kXm z?R}o}HE%(U06Xyvq22FQ>u(b+31O`Q&-~!uLS7lBTKa2VT1pK-&Rpco^{9FWJmFvn zjGp?!Mw`p&IL9Q>+1Xzvxm`H%p|z=+dlua_CL9E^0;^^FuI8t;>*Dn7nyUG4P&wVX zU2hk4Ney0q#Wr`EzgKOrF&>}f{^6M*^xNB29<9X)ZYMPeR6W$`)Pk{CP%+o%xbFU! z>8<>Fvz6I2k;TjX*YrSZ*Ar%_a%&2`t6lGq4-U#nhduML+VB37z&z{N&hEt0mYpdQ zOM@g@7yEv&XqdZzA#3M@9eSVt*NriFdFViZQx1kbcl%XK3xwUx5d;pQy1BMr{~K+! zBt?+a8~cQeXSdqH&WX}QK__|?zXvF!BB!y}SuVW&)jL7wB3E8I^FO$R5&SnQn{}CE zcA&`GX{l8YW2@JEpC1zjpISc(tU$xHcYiMOe4O|&!~U%AsT}vO{F{^~F`T#EhaD#4 z9o~s^H*E28^F8^=i9RhTf=++i1PeU=7rfB&%JMrBIo+Q z_buFGHNNv}cMO`}&`wbBQSw4Kfm8t)FuHSiQzCR(ju;SBRj?MRZ$Yt&2=1C#S z!G0yOjQo7X3m`Rx5kz@S5}qXVglE@t&4#?b&6zlPSp5r6q2RsuoDG<8lf5wKIX5Cx zQ|IIP+lKpnjhJ{zXJ!w|O@OXp8Hc|5oxA7JkN3Ta_xWXDO#!hm__>d}t=Ze%5oE=n@(tq z+b*jdy4|0r7M(gBS7>LV`tzh#(nov$A!f zBsB=chVEF)3^Gw6YL|^txXvYUnZ_A(mygJDTUVK?07g*s0qQlvwOEof=9`+z&iQk7 zb# zEbAocf(|05^Z3Vh_I6d3XwTky)ig=N)GDQaE&<$*C8Pb1FLesFFTVQj)?Yno)j%55 zzqDd#AQ}sHXwhk{oq8NP7r#aRZp?ivsk>us!i2`V@ zIT8y#4V7z7N zF;?jid7GpXFu7N%pQJm>abhu}!>NN5P1(y2$97kH`c4AP@Ch^)lS5!$iB3Z;*9pH& znAEm)F<_E!-V%#UPhhqJt2S==>Dv(w;tp2fhqcqWsah>w6MP%0B>c=YAU7(2mMXor z);QQpo4|_R2)NPtGj-r%dmQpFh$QD$*}B@qgqfRBA%OvJ6B-l!{RHl-u^Few5Yxkf z-mL5X(G#TgNSYJ_5!Ph}sC1nf7>w(jBPT{++ASISn7?i}$7AVWN6u@it3f1zqK|Pn z&8(sz0ez5KipCt7>zAx|4BO;U+?DiququcaA2k&2-=)b9BfB-H1t!zIxCAMF745sL zQ!(Z%#815FVS&JEfZJ&Vvwxvqh8aig-ML>;Wy1k3J!NVUr!7-zR*G&)3V0}u+A%AK zeuR9mb-CzY@>%S6q=BAX13&Xf%NjYJGqOO$0rTfJItZV)P@A-NWe{aefOR-V$)&NRGdu7afC^Lt)^k7o;;k~diehoel z?a7DQ)>E+hAH@uA^As@uA793AvpaEAW$6%y$b0^kVt32c{c)jd`x)LQe0G$AvNH1H z&=?o&QlwmPwMygUHm-?EQl`7907x8>qY?l?*!x`tjnocp_+1Ro9lMuIw($jDr7U)A z5`L!MX1vc}9Mxh5ofwFqrUQWUf}yf9rwN5#(Fy8d)L1UJK?Q8b0CmY4nwGz)GjB1B z_J_EJY$mDF@syHrv<-}-PT#^x;8|iij7c7Um}yrRKVVK18;#aJSJ6|Yw#v`ElGD`4GW_HoV)3@lPPTjcP_@nc9LOtiqx?2B)^tgHClZ zogqb8=TRBDD4{Y|p0%3{qrJwEV1sHYd}?v=TTj=u`u(WEuWn$ZT{BlF=JJ<2zQOxua^At~M zWMRhav&FR&%lbE~d#HePC1Um>E7I@Q&yoo1c+4YONwT_{DO}f=RqPz1~5}xnc zJjx}DoZNkEj@O)SUL!oHLgT28GJf|f56}+Dd~JM3t9Y~m8|FHb0?*Seb@j0Z!H?7e zNC|H65khpq7oM~4d;22HvBCSv8gEyp8^!FO+s7GlKPY+kPwCY4AYR)MDn()bdH(I) zr5d*jxpZ!Vi4fSo=IMxEu|Lb&vB70}3ga-pOIAHLbX z@_Tr0LhbE~*>`VPy>0&;(j~gG>J$kCs}6V6_)@Hmd$p{@VMzAY&onfyxwcMi=29d#$_s26W+Vu9TESl;b!8dOA{x3F;JRHdrxMY;F<$0zP-23DA zLFB>Ri6-?YR^%WpnYirW9;WK!JJWrA4=`A7dHi%OK3xRdO68Y~tr&6t5{;R(`h^Ge zU;t63Db40=JRup);>@@6n8o8idm$HbIhsRf+C)MZhuuTc?|Yi?poT7~j8&}f6?H~U zc4wpyYZPmNO%rv81F}W%cB|uIr$DUOTgN@KjdO-YNhOft8aNHW-$9bXyjT2Xrq%3h3_tTw!pt$Ew42yD0 zorA;B+zqFn|GABuj|F@l2~aEXu9E`CL``)L)d}-|!oO+*w5~ly_u{Pxuk}=lzjyuceYP+5s5$EXO^_z&ULp0ShN54rs&p^$Cw=EdQ-LaCM z(F$EV7thJItko+B*yTd_<=692*8c93q&C0&N=;Dau`LAc@}FhnEzzcEUEvL2Io;ou zGhX1UTeERY=7Q_Z1BW zBu~1Fk$Wp`rp1q{PRYIC4gCMyf{*vYIo~gQ0fe?giH8m}igysgO@aGgT%NaJZMS5B zPS>;9sThrcl>uQ10Q^5FHX{Lm-$S{5|2*JN4!ioXT(Ld1>vLw^92I-FFD>+H0F$_#&N5KcBC7|08hR@$VhpIfMS_-$Clsl9oZp zfhA;ti4FBI(EZ>z+77+J+XLb_I*WfPvW4!{=+na`G~7Ldgx%Kxubkw@fd1Ht7E!B^ zS6pXH2$jX0{r@j^+aB)! z{R{o4YB0KrsW*ea^8d*^2%`A~JDLQ~tFN?~`u!8oimCmAyoDPH?|CCnv5GUlQKZb= zzUVleWb4IueiDyLvO4EC`Qr1xa5!ER!78W$fNA{;y!+7@Of+rY?oT{Z+qlzCPlY8O z=s0nwq7VI$ATboIBF^ZQ2vY7CGRM{|bc!T1TgptL&2^EprI&bv`wKw`mL7gWm{sE-tIG~sZpAw}>(5@aD&RhC??dM)bgms7Ref6X~P071} zvp@J`ku@Ity}a+-aLfO_*8cYXLC6LZp1c^mbIt|1WwOeY;YtI|cB!h@$ogvpI&ze! zVgG2Xj(H+6U?^_puW2?s&M14Nc^D~S2X@yNVQ23=ssH>3ixPF`))>RWNH&pe=orfO zuLz{CdfSUPIWR&F-4%u$H)2wR2ia)3*t@zyiZj*xU>p@@X{kq%3`6y1Jg&UnKU~PG=&yIMG!GpIFhOkXgl~nTmYn_WomA>eSypcC+Kk;A44s4UcTTrG2((us*}P1U zIWoHLg$*0)Ge{*I6*sn~2eJp$Bm}ayyFavYWH_s)l%<@#N6I4zrme?o@w$#&7R^#K^yFZKRM2{b z$~v`#T*YhiMsH@2WWePIlMY}GH$)*~GMcw9Cd$wzdzSDDhB*nZR@@*7yYB?RHATGe zG&!}cNl$oD2`z6^oKahub2@*h@AW^!Gk>RNP*y*K6~&)nYRtz2#YBiSEIDoU(di~A z@NBHa*g1Bp&R*TAvh6#_XLugOkPN`Cd^pN78ikg8T^R_gN#`!pUNHf42H-&Wxd-daQ=-QMb z^Wm)~AEBWHW8@$<;;v2Bh6P-}>Hi`nx>e2JF%6>N_G`=Nq&vg z29B)h+rAuImm6&vI9R&c+Wfei&!3Y2wjyqXIJKs;F}29b&*A?3$3b24M(w+~t~wl< zi>2x9Q&4W)A)n^pQ+t-C;?2sk>P6_0>>6v?$mrY2qpA*qCff!{I(~WRo7MD>sXuIaqH>yFl0|t5fzy5nsdJPM6cv0 zaPYF!v55!dbk&sQb#ykiGZJ1sW0l^lPLDjCylfs&?pKgjt-`Ipt`XVZasdbKbF^p| z_j+Q>vg)#*Mbk@RJJ<_n=xhU*q3)lnCZtMA8;kO5s%izXqJw9aPELBJD|4qFlhg-1 z#PHgBk8VA^($c?J(_7Q$6`3jky(R)8lI*OFUu+&-i+1%Mru7wF`IWi!xG1oUX8j3= zHK{Gn#@M5my8{ErkcBxH?pgK3WXaK<&kE^T`t7$CE{3MjpDg*n{}505SS0nqs#3;2 zt`&l*6TI>&0 zHnt_Fw$^$vG{PM}yzD&pZaHXTIC&@=P5>M5clpUGblQPGIbEf#c@3S+xQOsC>mKY} zUG@Cz0{0F!@yIc3be%DGn|yH$E)qjijb@T@=6K2G$&eaG=VE;=2=)GcvZm(fD8;KS z6Un&#z@ul(ukMLwqHqjyIkmiugup>0p5UJFjMvVe7!s70&Tdd!zq^#-|NCV1e+4!S z#$ZzX<0YGczf`gvW%L=p64S?%QZ@d{#AZxbv>F5#n?jS@dhRaoyzd%`oiUIF zMG~-46}(LXK<556cF z8Krqn-NZNiQ)5|UL3uNMDI93~vbmRKc2xXenc3W(tydz$o98-sfS7 zYIy(D&dz&FOW)YlwSrNC^~|O$GW6?Am>olmNa$R3v_wxPaexs_Q5>v)$U5xhq z%B%X`tx-EPZvIAIBNq0>=aeZUAAq&AyI6wj!GP|2Mc04aVHsolYUVW<}b2FBed4f(Ab+?7suYT9h=;tV? zZK(-_0an06-uFFZ|Hld#WmLXk2CGQOc6>kPHml2F?<&*lMKm(_`e@OxN&{{Qn4h%cwZIu5AzkK>`VGfk1Eqjk^;(IDy99-Q6L$gkYWEZjFV;-QC?i zxCM8Zdgi-86e~ZglJZ? zGz@t;KxK<-ZU$JA$3{meMX{=&B@EgJR*X?^6-P8em76Uc^fiJ2x>W{5l6;(b)SrWw zof&J{KkxWxB7>~$y7Mc=)!zw}D;`eEG>g@XJ}>|4ds$S*i_s8Su`Ly(svx1YizmT8 zz9p*U=R*Cb)#W^7o?T5b4CF^)|BEDXzKe$_b?kv^%p8jMLbIz1>BwGk()49dyLDq| zRERjhRKom0_};@h{jfTJyLXEV1{ZEAh%1JlrU~91IQL;kT;7c_#mdQ9H)hkzNNC zvbAeyviu!;Il8(ZjP&lx&0J}2g2>4t)&ZKhZU^&KFLdrda_{N zcF3B)5;_HHHWh0PnXGa$;cL01mbUQ`$B08%8+Wdkgs5jL&b0cxo}f6Hr;{pKV7*GQ zql1106OF%AAH<>&bPZN}u_VR!V>wVGp2btRfM9ex`|MIOz(+U&hVs>E1A92|dOamPRl z0C=293@;BcgmWW@3DItS>;>E@u>Z22^k4W4^R{u3pi-fbmz@0p6J+v?I_EMq`YQ79JJW6Twwe6K7f zN??^ksVhA{hp8))vV$%jLx%KM8yS{;8)hGuH(_ffwX-vV?$r;S+t$;$=T{%`C{WmY z7vCQMlASLH={E^yh)x-k$fyl|{uPUnTVC{az8{q{t-d3fEkh`DQGcjbQi!UNR z>Ku&3*DSdlLjNp zMP+nt2R0l%!6t9FPFJ6dDH19!@Q{7nj*Vp(GztL8U$0jS~xtK8lh~Lg8fukVK~cHkVeNuD;Op3*nd$>{994*H zn`|ffJ2ZSvLD=Y{Ocj1E`-_W%^zo+ML`F6iPK&!&ecS*x z3eDm$RiwPF7;?Vl;<%H=kn6Uovjd``YQzj}Uw-pv?Uz1t?+Qm2vV&4<01-bsD<2t@ zvnp==154Yx%Y(8S)%WCi{gZJ!_fU&zSZNR(e%|lwg=d)@7c)aF-;^bjlPeD_>`=>( z9jpZu70Fn;_~vPpu^|4Yd&U%;_G@Y@$N$U-4LqNZWM`jA%{vd1Dwauo9=4dp?UJh* zf>F(z3vwR)p9_U`)VBx?BV9*sd+soL3wDp9UgL}YlMPW!K!AL9icJ685vM$QhlU`9 z+}Oxtp7_Iq&v}0*y`z$Kl-D!5Pm}xyXaJ`Q&0SJ5VosJ$0%nuEqwY@j&ZXM5h9k5q zyQha*-7|x4k~Ai`V#OP9)Rouf!#3|@rantvK$|-OI1;3mmbXVG#KbmOWd~xvz zLcn1s--gfmup>8ga6WFQcg%iSK5w!|X3VuBj)- zz7pm@pCtJp7arn_*C6l;RU#WE7q=daO`*bF{c8f&W9`VhHJwXxYI6e}9LbI=`G)cP ztI?eng=Rk=1bR#e)d&Y-FBH1d z6v@aSoK>r1-VggxrLgh5*L`j!fQteocS_EvAQoCf&-b}wc(gfsjW#Xwt~}OzC$c1M zQZ;>V;z|w4JJOdr@3R{%Sw;mTH^I1FEEhP#aoZYH@yVs8*q@hICez66)fB8W&39cU zW_DZ_j6iX6{9pQ5+{`$7#OK~#B$u=O)itAgi}`fB8rePu*Le`W6L8fQ@~O7wzsTvh-uda^5A2 zxrK#OC~@PT8-l@{#rI7P?`b{EQ`~Z;KPD{P_nC;Y(4g!@v-b+VZVAS$+-{-%ZDC)^ zj1ktX2Tn2#{2@~;1!j?gZgpLeqAV5V6%Y}{c(uF^GvSCqJGt?oqG#nk8ToZ^1ql&N zyLiamva7Y=mOQ>MTNgPZAwhlpiZr&PH1`9eZHz18R@!=X4SC!LshOS}<0E`! zpEQ%qZ4J_P;49&cGsMW}sGIz?K4{Tkn!FKkdPOkCw@~=n&99RuN@CEZ3;ph~!+|AE zMmDjL>9-RCJ-V48kmE)(Lw&&BN@I`BDaD}cSgw9ro0wF>N4al?=16xENoj&vBuTwR zcst_lGyx#8o4#UnoNovpfjUz~wFBf;>CFmn=$4O)#7vgB-G{5YD0NbS_bLL=DyXn~ zTGE&$_{aVAQ9QtJo@DlW5OUNb`h&-dFZ~Oj|F-2{3(o6dc*=I^a2!Eh-*j7?stYom zMr=i@A3d1)fL2)R#p8Yk1Ye9>xXCZ!gVIx@Vvp$9`|z_Xt8xY2WEVBG*N>(h>paQM z+7&2a=z#i6#j%~G)22dqjNpP(9BTqAI1WK?Y*CAZpfJ<*&3GxzSKREkrarZmT7PEE z(3&NztfR4Zg(~_g=u@%_)0;;^$WUojtx^A6KuIMQOXX0=Q?kuD-eS=st{oheTj8pe z)8TI7chv&Yz>MTZXoekB$vd%tS7FM5b?d|OKM=<#`IQ(Uq1jgJ+s}a_to?nj;^j$N-AV-vm|1Ax^r`7V`3p>JDy}+ zv>!(D?lsTWnaO+G$S6D3%kPzqb`%}5%SH?O*atfm_$S0+8dQ0yYEs5x`chH-`O!{k zaz89JXzH4wn-?C~5s6tRhs1_w%R}!*e=#Plz*Empw2h7~bq1uUX^X9-nk&wNdzzv2 zIH*`sT22|9=u<|)<3UTdWT&hpxV`(OqxI|39;(IlgcZaF>Ua@f<7=Wz4ePDv(lb$M zH>1#p*xr6q?vx$;n6y*vG?a+i-7qeXfY#XXc)n`$ueooZ(Ti)q2U9Q>qjg!ay!C>2 z7v)(E=fT;>9<{IIODll835L(x7 zw-}DW_|n1i$MZpd-v^9ncAY=()gM(vXTMsC*V4^55wK97`g#K3tPNFnb9o8RZZezF zV&F*bWtesIxT{{i+!$rrfEkw;42`4+9)C-@R+rAduJ#$5n)ypw1tR|FsMgI4dJe8i z#J#`);5D-B#{$}AZ{K({$wf?!uAqt!$5zHA8R4|dG!uhUtFP*4*@H6V3cs9`RKj3! z#rwnEGjo9rI);N{z0sZ4yg-qKS+LqIYP5hfJ>MIwMO< z)ni5nyhMxtQ2BAYcG0P_V4p

NMzYqOaWlwRO~sSL^_6-o*!$#PIS7=9NEdo#rIpCCH*p$3__`JcnZXJW6D$2pvJoV5s*S2ycEy8s=ZMFf{S-g2R-I zq~bs7g0eaNm3>a88&X!0N}tb>`q9ckOjxv@P$-N!!R@l54u=`}_y^aEF&) z#hKplbg^LK4QzI-V;ZNrKY z2iY0QIiK1r2dyaR2a8}+vokQfoC-hAmVpcQ8`mk;FJyvYkM9vj8~&Vm;70ynVMN4(u= z&fWe(59*3J)HEY?@}kJin__(9FY-e(P%H~u#sMOFG{kTz@=qoUlQ8tqt)2ftmhBqU4@Po;WET1ctP$=UEO&HCxdWLhK=X)Yp9Q z9TY3{N_vr7I*<4SrXMKk`S6pdoSvR31=Hi2^+dy|h6uAA#>voc(_fdQD%RccH+;-& zmq_wLWd$+B$>GRZ8E+SH@Ersrp55asiB1wR=I%#=q`E%Kg(nh!@Xjk@m;5!^r`&e( zr5L0fYsAXMC3rK?Gvt5uaHEbhph2ahjU|CUlut?1%~Dol5{jvX6Sd8tVQ=Ueu~ipe zd6KgF$d9sdf~PM_Q@?K7@lnl$enONxy_n8EEm8r!1~5gA2$J0z4(a)G9RQIH*ZbgO zVOAC2Cr0%JVjAtJ2{kFgrvCD<>--QT{nw6KF~V0WAf2ZI=U^H} zDp4E`85tQfmO<^f{#I)W?#m(MFJa(3Hn^Wlq9`y`lLRoIcw1X4u(2WDUC7MJauM`= zc~gcjH(O_17F}LGU0~5QFQ3(mvYDqzx*1yHMo18@*&iqAGc~EV5juDMApU(NS|e3x z8x*LkD_csV!da_xn zP*JtLS=LcA!=pwW8BtbBC?;AjL@@eluK{3lCC$u+Ez};xpz_8H=AdO zYFrvjKW)tMmP%#ZFSN_-FMES3vj>umr53#xQ-oU_&22qHirzSC?~rJs$Bn2|dSKMH zhuag1D^o$JWq3E@#aHAPM>&ni0kmVv6Dc6mR^Op3^MyY5scx-BF}&$W!{$t{>n>D# zdaic48c=ji7n~)28)B6+5jTN)q-@h-b8t0-SE=N|ncVzff&1wV^QvAusw98u{F;kU zH#dap-(q~l&u`)Ztkfme5lP~r@fM2KvcTlCo5m|kXIwciVjd>9zdR|!m(YQ{G_x`6LpF0}Z!LHyS zXUN1a!+z{yXn3pB!R^_1F2fYD0EsY19GWjXRL-_$G;@b9uP9hS>4 zi>5L5vC6tRny_2>%@q^fkG4`4DpxvGw5*^IU9sxTB3P%PsrS64J_bN9(R$0Gdl?x3 z=Fj1&;Nkx9x~5{iUuuT62JXJ6_#IAxPO>3rngs*LVO?EY>;USOtl6Ux1`T32 ztOII3RABX9Jz?kOikb!%y!2mwXL1hgrFeQASwoJR6XyT-4;s?apwjXuxX$@ZhZejzDtc#tcr;(Y$75$q3S;IjJ z{q9jtS<7}3&XZ~!%`RY*0g)}?;NLK*?#g*GI>$kY*Zl|Lu}1e03&-zS2n+SQ8#Mrt zl!KTbR?*C9cL~@uS4!xC8kAofbD5?3$?}ku=2vN<>Ne;3PRR}z53j~-==s|7B{D`| zAvL{g9anE>hZGLG{#&7T#3tg{%BrHP$K9Wl9qa9J*NxD@xPju}f&qY#d3R_@I{uVQ!d*!w1|F9K@wPKqCGaNwPl4$9}>bZTWhKQjrN@GR5l1%Bc7SXR7y_twSL zg@gNX=(Od|!V2G5S(9}C(8802Z$o*y5gPgSvxSK)UE%bk&^;)D_SH?-EenrP7^A65(FfBH4FTFP{&yX;)Skat zDSlyAl51CQ;fp%NDY8&6l%6$>wV}U$qHT)hPy@#*j8SpOC*}VhThmYh6EQyY#R@qW zvGD(*tg1P5nTF zd+fT=*smTUnIG{_fg5i9aQma_k0V;MFDcqu_alXwnFl9UMa6uR%hI1}c(_3q6WxE3 zIv$3s&*T+{9|F+Ghg>|YPCDkd{hxBV^@V-WQA>&LfikWC-OTZhc@5Yx z6n;+^nDomjRDn>LdopT$=@x!GKZrq&{QL6q0vU4JI*#X3*to$F!Q?UT*+8(Cpq%&s}D;XL!`p5pSZl#UEoc zbsjc%vU&(1dq_jbVSSJ}>^0Am?zq_kl@?t#>S(kP7N#{)lJ}XSFk9Y{<5)cWs?xVL z#lMg7mqLtMf+2!D-Fxggld8utJrbc+51%O5E5s_p=iIuG~@8KMIZp zc9om(c+r(U@9oHg4e-b2=nE}npW1Thrnvo2OzEkrIixVZmxb-^88C%xPPJa5 zsbf$&>Nj`ISn^tACQBaQd`_HyONpw5)X?x#F>{h+AL1*pz{T2sxTE)7Qq- zJ?b|I)ZH@Xe$Rlt2cO?%i>$i;!-TUCs>`;WNB#Q!7sHc z2jxsFrDwmZ-0>()*lNGMa=EU21x z-Mh*O`@=F6$=|({-neNXOA%Bj z!(5wU&Ir*mDwSsK;D`0j! z==N*LDZstlx$E=IzO4RKU-LXQKDu(){j_G6i3yY6qVC1b`KqldJh(xUtc}_twItq} z%I{BQj>_zWw%WtkK=GBID^-}k!voDYjicL0%-!Zo7w_is$o7#}SJL;pqtT`tJ98=9 zgVKvK+iSnwqk&V>5yma1G%RDol>TZZJ6$ioEl0bhN^ALY@&`DHj>JSl=I(oV6W@>r zw_YB|g^@9rwjW$w=>y_+p^+muceR|8)q^84?lIptw?dPE*^2ryU%KceXJ+ny zk3*XzCa5lfK{n0@6dR(hV|``k5m>wP11#Mui*3P^qL6i6&OBT6o~Fe>z+A0nM9S?& zZWMbHKWZ^S`#R?hSDNqEPX$%GD(YH(lGC!(O8K?A!RJh9XPXh()!8_y|}-2;^g zW75qP*O|XrV>`$z{~WR!`x{r!q?nj!a#rpwJZH>^$74pKMHHgt2c+@!f2ZFOs<@wN zK~5l!MEX!vs5mC9SiD_fC_;Ag0so%|kjP|2!})D$K=Ms~V{4X}0I~YefnI1sdsRe2 zE0HFs4O=6TW)ENDA>acm=nFE4++UMG#>D?#wb#AE zZ2$kf3hKjaSD4hJO4Ds>0JSRi|F=_ca2`Pd5V&3x$02M{%xF|{Q53rr9TcUG|M^#j zTkD!xuSJ8-8V4gop-=~wk3*A_Rz8-DLaf~OzmRBt*T4U7V;*r59XOR}#Yk>) zYF-AgGTYmU6F!{{a<0Hwdb0xm9SQ)34A+z7`fv}|`^)*{;y+3M*H6y?@(h4jr;J)8 z+qjvUy7I=ARzOQ5Bz|s2Z0DPbe(L`RRtP+Moy8$5y%C=Q?KMSURsnHrFD+K5YFq>r z7C2c~CrwkQGnhlQy9nfr^73z97$ACn^F=c#Jit;1gRkfdOnas*#Zh;4cs`0k=4L@r zJ%gQk74$q|(c}N^TDzeEfTfcf8&r)i>gKH@WVkC9!8g`%$Q+}UWeY-K`ENr99v}yW zk78I#6q-$polV-K)?oKz?umc>%84AvaALiAMfl&gJXA;8aA>q|UDW0{1xx6iz@9`- zFu^5Z>{mECL{UcMD4nV~ihXAPZ4Wok{XozziYcS6rnU=F6NGx@l~LoqVI6X+g}X4; zP3N2_>IrWDWe^|x=HHbKKqzw$e?EYtjArZw)Fz>kk_YtktuNB-^c$GaI15cZjes*E&Wt~@hH4I+h7kdnbh zBt|HP`M(AO9SAlw@^Grd0$xGxu;AaWOdTi*y$AH0;f7Njz|IEwe+A}$+EPj(s)%Cd z_Yb@b6VW2paZMXW_nPmz%2P&Ch`|3(|HA?h-WUX8QU`0IJECFHE>#T_QKcUWYhj@> zmzTd&95ayl{Pe#9BlsJNL3AZ>R6!S-n`3WkZfvPhz6;RX$M+ z{cqg5SZBvi5#!(k1_nS?`EA!`5JS8;i=KQT^I9W658roc)an2E&K*2}Z=^Pfkvu|f zr!ywC29LF^PYhMwj4eXd%gE2q&yq;x9#`(4tTiy20pGqko+f|u{rWLb4$ZLCLpA7& z+FYC=g(d~&!~e8;M;2R!_#Q_B3x%j41d}4sD3j6m|Iw`BK{;&Jk zW7U!8KmU2g`1i#D-v+_CJn{cr!s#Rb@4o)O5&Qr19smFR4db2LGfAjq#=)01EVTdL zmche61K&$XURj9UMz~8AkGYl;*}i=Dz}mBcV`&nLDSE{2)cP~QC>pg_2PK0{G*?<+=PTu9J`$L~Kn}(nZTJo9=7IIaf0X$|&aX%o#Jcs0b*fbwOA?zp z(T?;%XRBbFq;{@Xw){es}3Bl+DgqBWYfTX3Sf_CY}QprkZG2D4;fdu^do4J~mp1+N!`{P_Fei(=TtCf|Fr`I$+aim)9~n?}~W%ID$g z^}J?P7QIaS;YOOdix~Gvmw?>=8L4)|0&%z|YhzQ*6D!+&iz0PF8Ob3(mu)SXDU@Df z)mY-L;)-G+*Sw+Ai+cepHE!(v;TmKNT642Z+qcode~V?@rJ|(1GU+r`d9i$^3zmwD z{jlGE(EISSoH{e56jved1`xw-bv~)4XMmW#3B>Bit-1)e7bwo<7B}@=XnTBie{aPQ zhY2wqfC;b)zMNfYOZ&@k`62Mg5&)j(9)4Ne$+NBXp3LbJEaYL%PeGUz@84wXomkix zF<`uIAq7*YPF(>Y3wM*F&YC67u8;Y>O9yk+kMJ+<`uq#3DtMRPKBTdxm8FKuWT}r` zyXun*r4Lw5XAZaBaexYaBTS^f5UNU{#g!^&?H<+#n18Eq55J9B%B&=$sBb9WIjUS8 zbdUxo0Wzp=zJJLfd+(U4%h-mE1=A}sg+6~G%}n=xh$Uz+%|7Pf+7hbRrLAaD)yA4( zyl9D1MC0`H_2F#VsAhC7mpfV;kHE~}~WEn}=aXORp>JyFIp&pyR>=uJ7a(&zW3 zUsu^G9_}2}qFV<5HQz(T$aEX{_}ImYTzG1_aRSk@&C&Dz_GuChzaBKXDm4^n!5c4M zEh;Pay1HbYg00^Di2|&JXtq$ub?4$>tdCKwl90QD{lJ2h!KT<=;bs?8>ZF}t4!vxe zT@zu71C#bhT`UBUIkv{@U$P6Ce4-@6lV`beqx+ha88XZj)aYh+f>uC5OIM<_xZs|k z&0l{iM$}^3Q@F>t5|-I|JsAg)92p0a>)SQBNzxqH2Uewk)@Ydv&9^k$4e9U%sYUlCp!cNQBzoq>hUZJ~O! z@Vj^FYLUG|UK8-np;u!<1du;Xm#G)*g1SI8#%1VFw6`i=M3>;#ensQfVMAV!;&6H*4n7>=rskHM{Ltrk$ zfQdkIPO!jE6|`v>_V?5Xp_ap@Yq9?W?(GgZk$>pQ)iQ1fD;UKUTjDsIfL{caCZF-t zMy^tfE2VB9*^YuCPzi+n=Jx3uT9vG*j^8h^rcSS-FEC(}$MzPq zHT(Lzc$t05_!C<0CCE&2>VGvYVy?16Y_8CJaki@gSz$*tw|eR!t^lN^M?jz!@3X=2 z>1c=yIX$>%khp}tDpb>koczzm?X+pr*+RZY5OHYVq3?#t3jixzN<9ElHIMF@IxfWS z`o(EPdk-57=7O+B>{$I~*2z^324&dKpD5zt0P;I3gko#T*%gcj8K~vQHtuFDfwJ8% zCU3Dv$wq=a@i2DzK|lY^Wb5`z1%zZJA|gC5^lbCz=5|pU$6UDrq;>6+T}s5N(us-H z_vnZaoCxOAWng`V^(A~MB$b!r0;?y5iC%$|&T4paa0Pq6wXN&?akh5sMI>mbo+c!h zV__`b_=EKGybp!;l(AO0$x^xGiJG8O5YUa8FTN7{3(@vmiAn4+9riMg?VCj|F1H@ytFoAJ*Bupq3)X2724un0T?D@4 zzQz$1BwaBg^L3cfs@nq5QY%Zhs;i>s2~*6 z!q3vv&kp{RlVMBU%I*3%fO4HW%X{!WI5S~9>>6X1`qlx+zBg&VUu*H`(LD-C2cK_e z|1i|d)NS$I>d0M4s(Crqe~QKoa&LaGv%NgA=yKw*)^q6ot0(0U>Rul}QYQcWnC~2F zsMqYc8?psJ)0?oi7PNx@nZSHv{=EME)ITcR;sq40I8$fhEa_lr3D~LYI!Y47C8dAq z2FK6;EL!QDdx3^>po?>}2WLePOnER^Yzrr}=UrO4B+A69(vp@wH*XyakANCJYC6y4 zgp%$T6ipq?mb1ST(~nLHZHAz<>H4I~^Y6Wli;6r4XaDQfD*p06!x!kz@?OZNEwt$Hm|3(X z{cKSBV(t7BOG|GvceAlH-V3v~-jmi<_NLvVVHx(NvXgW-F#VTpjqmR+N#VX=X27BR zjJVuM37>a99SN2X$SPKojntpB42(#P!y1d}Zg}w^i8E~|sjRJS9zKRKgUVJtIm#{h zhP~wUNo6@-A#utx)1hD1aH&Bc-SNeRV-UdPbh5abyuV5|Sak}p!iLI5nG>jI$c^Q= z>XR9Bno_?7Zx40U^i^F1vzkCZH+A{t<%XmfwYBD`f-foBUSh-qqghoxz#dHcZx2gW}&%P)qd2w@(%UZu?dM5Fr9&84@h!`_2-qN*y6oF%tl=z&M=6`KsXSk&c(M0*gK+iJ#RdvsiS8bMJY7C!D ze$4U!zv}3sq_R9&eXbLZ{K!E=vnlbbGNy|Jpiv|=>kLP!iu+ec89X}Z(OlX`xmOT`DHDP{hT5$}<956w8=T^(5`m*zwR2-6D z_09Leda!rHfrsQ>Sl7mpjM~v>6tHflt`3`Cyf`3r;X17VI)yB4oi(gLbC9|W1G3Kg zf_LY76*~ve8wGSf=dJHKmcrhrBQTMVxkYer+SbM| zfsmK0-IvT9X#Zt81&TNS(kic8(fAg?jHaI;wjNZxaa$oy1;p->96)vvZ>QJ`+yH6K zmS-MU;79}aMf|hsop#~MyRd>oA_-;JmP)8xd|pWYOl)ftN6T-NjmFi$Xw2B9t6g)& z>e*QlxP*OGZ`-T(z}bsB(!E4!X&=iebKSPkZ3|i38GfmUW(9TnBsU%#siq|iIXVBw z<823lN+D(+GwZGv<>Y`-rN2=kY{%Svs^*vw%@%=A~`aQO-8Vh7>jrY)Ej_k$S z3nKPyhZ9ky<;x;%+Dbii&k#sQZYc7Y0u{AY9j8mNRONM92z=J+oKD4g5I(*LhfhCf#f9XZ57k+lxNx|YD5HFs*z=e9Uom!*d@cr$z zntI~n6TvR!M$yM#Vf=U4UNNV|1$emlX}m5Dp9mX$c0Wse!CK<^u|+=qf5)){Pufot zYho()UsF!-tSEHGcReWiX6_owlvEHo_;3_*ySfJ@Z)+aXwwK$1%2vBC5Ejv$spFxa z-^`+m8&8~g`~a_HRdX_5e%vqJM5fg6`ch&aYAKxL!&XJq9{^KRzA~$|7Er7Ir?ccfq6cZ!X%h2=FdKn91-O-sIP~MN=6{*{!p#2}*$ zF-T*6gtL@TM!$+0mNu7mWt;b09~;e(W;o#HJ^_Th7#dHUc`@AUZfZHBUOn5lJv*hs zSc8eOBMZD&$}?LoLZMbt^QQB(X$;Src&-qrcLj{5%o#;1JBElkxnrah9R*=A4Isk$tob5eRzw zn2ONi_8;Khfn&ZE{~wE*97(T&MH!q{Iff=qzLRD0#wnP$wz_#g?gv5lg<=mi-21Y> z<16Dnuj%{4m+~&z(t7c=C^R*&j|^K}l)}I2*O;A(#*FO7*)3lQ<=t*r$w;I~RaL`< zi|)aWu;2)!x*(P>UZGi{=FQzhV<_R|1S23X3Mhm@_p?2G0O2yHgHOxQzzYl}+3i5M zypQ;@Jw~Wh5yW>(D9M_;W^C{QKR(ws@y~s2!`sfcza}F|%>Y6U75g=V2+4_3)p#0u z&>nD9a14BTxw~Z-EcnP*@~8GO1+RChc_fcYRr;8N~#$uZe zAKT?;AD8{a@4nre&);PR`GnNSRabv+zbd@+RmF8rSG0DBPeme6CD}&5KEO%+&we*;QO0^Tso$x)c=XfY9mS;v&J%ii+2B z0aUL<;U@qZl%N9g5NEw!CKBfO@Kgv9A2J6gq+ED*C3&Q2)EG<{Lz`nAw=7(R;Cnkt z3?FN4;&i{$Z%k*vpqq4%SJdx+uN14$Z~n7Fzd_ha1K;qks&{Sky}gh}Nm=W?f%nBR zGDhG>=^y}JprM61j>4Kqg&3Zqr}`_YEr1n-0t~J5oZYTIPq5t7;E)VpLX4~1#LWf& zfaui{Q`FFgw1o18{^PoQ#Za-;dr1vQcOXI;f!%78!^3ATT)&$RhC$iI_g3ss zFMFHic{l@Yrl;OmI5tGh)nNaUw>5N;!aPnWpZ^|TE$v4KQK2dsPr-b?Aga!3A0B>p zbzdd%6X3eQHF=3|Ps>dHzrz!VQqM1L1np+!TTDzJXd(a>#lDN=l&hq)tvp!D_i283 zs<^?kZt3+Wag=$}{q3)O+(Nw`qP>_M&g%9dqJ|xYb2%sCJVve^^(LD? zHj`=#wPS-tksyzj-5FrVK9$J}%Eu`7(r~{nO<_7}j_AX+#p%TqHL275Zmfw(WJ+k< zVV0X%?v2a+j)zbrCmb^;n8#$`^5k-Yn9LK-VYlI*{L^g ziH&R)1bFz}&sP7At@>8AwC?n-#MwsGaIhF(F3)Ih)cjr_URAPM9`BdE{9X&$aGIB2 z3H#Uy_X=b{6^Xg%B5D+2l0l(?ZZMz$Qm)jEhb7IpHN>G%(#^v{8sR=RK3|^&;`pYI zLB6#;+tH9Xx*6DTap)!`Ln^&s*X$Tc0GQY_EW51EfSG z?nXjNxHN069JgOAP*jqKnHa(n0qtaehsz&xhkhJFzPl4V{r9_XzwsTpMN5R)ysS@8 zXi!|m>SySqmo<=FQ?7hiVSIZ^@9t z>d2s9{9Xgz;c&llZm!(`lDTgEg@KRTyDtkEA?Sf03VEYhQ&*m!&jLMYJU2!Wa`xdq zTk-zUJb8BfKRZ(g9_AjsSM%|pyU4qp!4;7#9Qvg{fe6P$;1=Hda4aLC%&$l+K@|t^ zTp8W3F;ROm1$>dH<9@_sX;l#_s!c%f@mNH~qcO;if>Bu8KYr_v-T<9HOhPwdOM-fe z9C6i=0+i9kIgQOHk(;7Tpe0*2$#A8wx;l9{y7;f*jTs4|4{z#U2WK)NyMlXir0FD3 zK1&jaS*H}F$cO43JEp-S0%tsI#2+bq_hJINrf}v%G05v=)WcyETsYEd*VE^QTDr`uNciz9H87ZtJlk8}*8h zJf4o^lp9%Jy-}Asv!ztEQ{0Ko^J;z0xAH8=(5(`w?Q_A@vnJv_d_HC>yR+YC2|SuR;q#qs;t>% zP3+xTwUctjNQg^8jQ5)dBOyK19d?vN)ZX}ErEJp`KU~}YIltU(nK8rLK=Y1cYaoA= zTy-a0sSMzAzZdkvMAjeEo3b6|Baf>o7*u3-z+NBW8Q$Qw*I1Br#;gi9tl32C?A_4w zS(cRu&99ma6Qo#{O~0`%fR^*yy46h)#q^N(POA#jBE7q=1l{S~TK1$(fnysSA#Vnxp=S~(V`$k#|%`9~01p-Lpw`kf|> z{}`BJ=d4wBXUgHSY$OhRkiyhB^RH&*ZBBpNmi)!j_<8L~68w977pyv|ra!eb(LAz~ zqTrgtfBdel3oDj(E+J2o3Ehhkf30{^zx-=TQbE&i?B*+nBXRq=xY6tHjf*o5JcLn< zAY?N6kXlA7wJ2zWQkU&PgEnsTs?hY zJansN)8)+B4{l6zt6PD{(U4=#?~GI(XB_!OmHzxh4ul!$;pv+`?3cgW?j1EK@!~^e zR+i$rg~2TF*$-P&8wM!+N6|_j->A~ok;T-^g4H3oOSZ5aI)?<<$O(q=-`0ed^bL#6 z)3J1nP;R8PDVo`N$J%X4P7G=j)y^$A;HU zR7lxHUEsLJ;19Y*e9nqi%FqzUe`P4_b7?0tV~4&58K#Yb|FQ+Y5BagJyn_47uBjQx zyg_yBDMAhjPOOPfg2#2Qg6yv{KHKxwK>_Mv*=LXX+H1Vr%w!g(oEd*TC$p&il{tm38#v0Pi%G5l(Z1s zc2|))SQZ}faEB<**hW#OxE&eHHAx{&7GDbcIvWB+RM|~@X2D=+0jrFvs90RlHtXns zVY=YDP1b89&2@+AxnZ0Cxy9GfZFjwC-eLCy#-}i&4tC+10ZP0Q#slY( z*l8tI$4>K~Okq^upSYHdq~B?$A}I#XHxL-v@W~X0Zv~i~t>NVX; z2Q^sUdygjmyF{dYIHC{8-tHYCUPlWo$B~~RdFO2{mL4!Z{)Clts78xdC?ASh6e&aJ zD?$6Vx;f7CKF_9|E~YyHk2>T`)yvF{^p_E6%(Y)+u6nbx-N2mT^*T2}1^P;ToueQ* zm^!3@eh8JTira}k$uM95hWK%j1<4pF=*4GE>EiI*G>9NYXCrpGq!x^z8-6rMQ=T^A zLx7cwUVOtec4(3ihZln*U#ZEP=v}%A*^e>f1=qW(icw;9?c_^5Yp~VSjkp)-7>ga3 z8a+Wpq=|_A*mwoF4BdI?qtJ-yhnlnw(0nu8W8Z<0<{)Zf3vw$y_N)BG(;Ba`DMW<@ zJgduLJ|;#7%YdufvBP_@BAoZ~G*M`e6XH&0*F-;pA0@I`kfIxot}|Pm8^?EN1`6~R z(Esq!6D9NzGKNtKzuZ<`p`mXvX!)xzAs1pWG~Sn-hLXyUesoL~F@?((r%#{d{DR3L zZ|)#l(5T6hMuB1*U+t+PQuSp%d-NJZ;e#^A3Zah`@NO4T1AHbh(LHqE!Ec;zcD~#a z2*dErYvmE3<%QrHXx^Ux-9lL6g#P2fMW1m({M?osAEGjkcj-+F4g$y=E1d=4vLKv( z09+r~X#sln^JDJV`E|<9KM5*_Yl9}ZP+qw;ysGAy2K8adG_|O76SfQm?-i!#iQ}-? zZ`oNhAY>&aJ&o{6%m`%m$XDY--0(;ZIlW_FV5T3jV^s#56_8;AOF2Go+j%(D_UN`iS@eZ8A~hITS*G3yVR1c1BWjZ zX^JROF?AN!U^U!00T0j9s3qNk4w+4{e3n$BjKD?6mf>3e(=O!OskF1d4Z3J$xxW2` zP{0j$(Pg9T!iAtuh&aZs$5)SFWQf@QX)m%&S~_~CMf)I?ZzLr1tq_Zp(!yGEOvKAz?%Xz{1Q0ZvlJ>FJNcDmyIBg{?d~$WS{$UFGN^+?{PXawYqkp|x5@ z@&BGOZhntoLynK%3XK9te%UCF0{+sN|FWA)vc-xeQDe&t-&QU^JxJ^t7bs{sH)sJsi*~(tmv)BPB7Jafc;c7Q&;R-+<3{QpbB6RME%9v z_b?|nQmj5ci3Oq0D{J0Znc!tj4|bIDbe&huh>EMHhivJcHjpx^WHw2BTXATV2{Dd! zQ^yf6=%@xTcchasU#$Q9!pug=JbVE*RaEBW==rm^Zu%EWS#RZKeZqWjwA$N9GghOEbYVD1CTA2fqoajdU$5?IH`e-OT5dw-=B@__?&rL`x752 zV*Uq(a+U0S={Iw$0MF8cd_p^GJKOS~PM+LEQKZ@pyEXN-ZYJtY4c`+mE5;22@8gW# zTm}5+@tD(59o!W-n`~=fG<99y6esh{V*P!NM(0)@f=MuQ?N*6q_Y@}HrMH?dZMbz* zHNYPqSmQ$c&T!Tt1lw|2Xoz2!((1FlrF|>eCxI^e6y~S1pB8G?+Ron}6v~$n zGLAcpA`+oR5og^^a8Y0MFa8F3G_M^{~_2;2Hz&ky)uFlR*o*Y2^G?3 z8Kse#VG72KJ$VR-%4Qwz|BeXuAZ`Ec7v;^pYSTZvb8nEJ=g=2DybQhwy+0+Z*1 zG?~!NX)h1VYRK5Z!+VxiB}{}awnbtfvXZD2tvuEjk;4ZsxX$HH{F98OT7VH&6s2iY z@rwP0qoZ+0P1%f?`kKOlFEtvD*J@aTy~^SFqZwIZ4Lu_f{~t1=@-kMrIbHl?pA~%d zIh)j5+7MJIIK(+$9Xy>A+j50lH=UqOCre3nYiI7vHUVBZvytmA=rGNBvJtn^!lq4O zS7>z({b`EWb>3B+OE33^^h_7)iI*AVQVDYlovVE=M>$-Q+szTM9@oTLd2@~Y?LBCt zfrrDM=+X!E!3}H-*5K#wZ&(Yzn2^qET}cZLzPoM9iDgZiv>HjNv9X`HgBA4TPXq&E z`x>i%n>lqamtQ}|ZM^K~%?dU8>_%POCus`i_9%&SxRFWux!hc@kGjknjS*mCVG&@F zxv;HHVZgt>JKnG2tvrqP{q_Y)u<|5ATkXqNz@_(h4^Owh3tASWDG}t;b4EabjxGw` z2W+VAQvTQdsr?w+L@P$TeD=R;Z%a$b4HRnuk7i~jqlW00zH5RzLh&0XSKb0i!0K9b z5-&$PGLVJNN!4@53Wli<)mM)0hKA~%Zdg-(R-R<3(rTcI%x%PSr4XO54EEqkk!gEh z_x$G1)6H$poz5K8o`sz&pI0I-v2PIjbu0V^@l&RHOuGugK-1__`*+1TcJYOAyv+n< zqwafT$ENz$G%BB*Gq&MpSp>wE_H``|U86X#q;GmDVb)s+R=5pBL^0O4u zylWehh=~-|EP&VjXNEAj{4XI}TCbCfkBK$V@OZ~iL<=)IU^_-SCPtH>HE(`wWXUZ? zLMsE9G+BxDy9ESNapj+Fenq`IM2B`DT802tHXLaxN{ko1$&QAG(d#58M#CPiYk(9) zRR7HA@y=4asQ~=C0I!xZKr8N-9;&XdzrA!X7r&tUj|zvF*3Uaq6dv!BZ{W$yPybyd zZ{xlhoy8M3B0Jk?%FhkJn2<{wSu+So^##+YUBmZH&`SJJ;fR^H@sFZnt zljeLDWKtwkHpS2_D|hWHcxp5*wNy%#qoGQf_}MEjid*4<$$QJ9o zd6S<`uif)kyxXck%XLqF?5RoOr>>pHqhqu((L&u`dwqu)xL#fFNBhA;jNW@pkxSmv zwX}3kTO;JEH!(M|W7A?P(|1s{Wt|nIn;wSkT&y2%#kt|Gtx_q`UUWtEO{lK_dt=lV`eBN{_uf!Z6t3OZxC>^=l%AiWbVfogdmTc&QRpl{KDr!@z- zO!{wN!dQ1PsDPg(fcai*;V$^TWi%4ccD-bXWG=Iniq&LlZ->{naxg%AqUC-9C^n-on5bTj+If#EVZTR#8{arVW3?;L_VFcYiUoPHlNd~^()6j2fD!WEp z2T27T7CAEJpPumxEJEXwbr30*2oiVjxLE=+y+J2dyibQKvO?ZBX5P|$BcTF-&^@Kw z^n8DHRTcI>OVPbGdpXQu2;@!e8S@6hb_(?P<{d>%Y34Uema-pD`GS0Wiz{;~U`!T` zlaE<-wEt>xj}J|P+OltcdCVe}g2E`w(2!Wu)Qf(cp@u&%3&coQLC4ewfD8f_*-k&= zeC1+V_x_=^x5RlzXH!|}ccY|8mfVMal2VT6Y{b8~go^$H@9t%FO4K%aC;5+q;Ri|j z+WJD8{UZ#n351eFj)E@*QO}DN!6r)M_&C%12Jzedsga|ns{f!7QwttFbPKk;Uf0{^ z^lgN;fm*-zL=7|MC=wEGu|+|HUNtX_8a(Gn3y&TY>Fv{?ZIs%xo1tf*ZxsodgtmmT zqX3?e=bi)wQDk8Pos@;fyM+`?c~@QUIPY+CYAG_A6tFo;S2mYr^T9{<)0-HWMPLI* z;f-jSt$5xHu=KmEU!Y({vIrS2Uq zOo{Q&^l5BoyTB)Aq7ZfYm1_&5ayjj^ZUxKU9i&0bdNUPetVg_%d=xX~6bj|prr1gW zN8E&C-u{qT#+S7kvYvQ!%qZ%w^;)b&N}`;lFr?%##%hN*sb3x;f<9Fhc(IlMqbX!L zwW-32zql89qlKlY^ajluGch4^+qXQ;8F6D%r*!0^LJ0E~q0e@Id+4^YSf03c;5p-j z6DCWQK!@W$A5H4_qHV-NqE3=-Z25z_fJ^ET+1*gyd*UXjivn8&&Zo3%umi{}utjG* ze5O=RptO$v)Pg%}oJ<;qD?T?7wytkqb0me80(XwxFiER7{bU#B-sbMo1G zQWBX|ja`1ht&!+UGDu;1T#j~w8+jPp3f%6@-R4^Km1@5ddDc%A*>u!@*@;r4N?xFU z`9&pN805*s42Pw@;?h&y@16n%WqVfC@h(ePM^(7-9skRE;jddVZyW= z=8Td{BOyw$!e_dX+VICPVD%SfUlx-Aq}lXe5n=@@9H&$_|L~a9Y(?Zx+L6FE-WAfoko6Z>oxYy=+Wn?J=X?*axYqmXugGOJ=69cH z*|oxs9ww%|qJ!6weZp#uk`*4b6j7{$_LJ*WnkJcIYoAl> z@XX^X!0xAF<7smG&!}vChO@verU}8wXM+cd|N4!_WdGEyCA5v%&{J9DY2$_1Q)vD*2?DIQSe1)x(Q@lW2b6ouQ84YWq=R?A^O5|`hS(kowI-oHDR39 zQoCqQ*Xj|-;Pl=WlyJmLLUHi2wj(ne3l$KDcAgJlyE`J z5lbqv_%RK!$GQ7oadR_4;q=zV>EjPO-J`UU{`W)&p58YxivRDMA_ty|fe*I_j zd{yp+W&Xdbx18RAn>@Wt@pF|24=lEK&Ev26flnvhiT5`Je~u@+HpT*(g8$fn2LG!s zr+e`L$ecNp7a~~EM|e6`k6@M|1eO3%z`Fs*tv>^sB?PlRS3v*ci-ExS+=NA#g(sz` z(^LgpvR#Z3!UstwB`#g@5Y)$|27F_8bL*?Lw|=0RfyrgAqxa<9#>(!0cxUaf2PCq* zu?P@0MF+L1o46JP&u3#@;15g|g7~o%B$-xx9YjpZO^i%adGA|qW4q5OO-pzA6#>V< zV?UNf8q1b;6jP~*)v8NmBCVn9Ve+u17mC;-3J^j-1Owp_uvar zQl2YmMku&_*d{vrJd`1yhQ#k6dT7ILk#T6qi64o>7X)zh4QGXOBNR4!MbvrNR)vG@ zBBr#~Gs30VSdw&bLYNP1KG;MT9q8hPM6fVLsxE?$MHls^HhmWe8@gL+BpZbg4(a9i z_qk7AxeD1(E>98}rQj%K1IZgVf1c46k&gPemm40rF0Vm11w=cI1Y zq^x%PwA#QU5wMflIerqw`*zXPzGjHh@CWdf3CVUpTySbC;4&g8{Nr`nQnn9$b&oV4 zUL>Q&_ig>-NpVA#=IG+!_kL(`Qch>ciYz5Dq=4&4S%m1D+?d>&>ov-k!&# z_=B0>)7znOO+B>1%;IF2;JUkS_7b?C30o{#op7T_JrfhZZFW$GN5vT}^{ll)^` zZS~Mm^s)@{si}kF;)1mjQb5dXZN01v-9_(8D99gSX*f6>>ghNy&X`tQf=>k^b+w9ik zO=H<{72N@LdLHaoJahD*Jte^DAIv|i$&tJ(AG>j;BW@OU9*lW1Wc1bIg+7cqTWE-} z^<&nSv4w$svZ3g#q68i!Yxy zYv|V&(lAJNtZD@-D$uIW?c<59Cl?wjX>pc@7U1#n_Nuf)1`+Yf4M}9zZE;p+zi)B( z_XdhNmix_oVOY-b1<%}q`sPDrWty^DSvM+#8!N%n*&yA3B2^;reT={C-&freIVoKb z81gTe-!E->&bsaw71h>JXC90$J@eCH%lm{#bNSMdoee%Tt)fcGsAGwX^m?WE#U+o; zv6!5+=bVg;bP9;+r1$DI?X7$JtTg041TgFO2);1e!>ax)wOpEjE!o_^me;jx{@FGG zxAY;y#kXYOzaLxgj`F;kX||AZ;psHXu>M+jbf8ux6LVZOiXB=Nc=N_!L$-)=er4UJmuaz6rEwm(5~d1F50$$5YPk~>l0 z47w85=t670WSxb`wL5ap{0EK3uIuO6>|QMS&m3=k!$o4Wq1C+>v$)6@atG|iC24D@ zlD7IBL7PLNzhnB8fO$a>O>-Ka>~lmHXIL<=(+lZtLmm-yslS;^g7*UhNB z`9MpQErf^k!jx^=TFZP42t!yzIQ=4yU)LbqOLi58U6F78qe9ii{c&qxB(r2QtHwGl zSMoPiQB*J()p%acy7(?HfSWJo@zPSMrbbN^e*h*nO7Zjbw9Txncil{$fVt-%PHr~- zPEZhFbJ^HpmERqF&nsCMf|&r~d=ZaKmR>g<4fVfzs;R?fKLH&qua}6z00>S2ju>>m zheab?6@Pxm0HEZPvY(EEK-)+8WM3qe~;`wdMfpr^37uBzU_!8x8 zrByftGDpzmKf$~brGFO9kg-e)EtpWeToUC7Yg=PXA23e-)l?Z#SnWThuQ#~iw-n4QSOcqbFEk^bQ352UypCIyxr$JA$>$D6RimR@y ziaX7SgOfU3DE93qUQ{V24KS*=+WNF^tN~crktbJ7A_18{wc}#dL4+)MQ}Z{gePHVn zrxP}IXXI}dk1HEYYIj<%_9$Rh`bY1D2QP;{+HaF1H#mEB?jV*NpqDXzHU>hi5+9Bt;<`qYB)vRHLBs%82f zz5YvYIVm<^wkSE| z;7vN{%_5Z&Ix}|Rz@0BQ35YJEKEZ5Cl{(pqpDCaAEAG-!3Rhi?p|&(htk1Y{FBdzE zxv0`weC?i$5=-g$3Sda4jinN8k|jRU*qqnO-g2gr@q%oY;MpI`^6koo+vj{#UNwN6 zB3g-cUBc~bdkr357e7}aP}~@dqK@CMnB^4{oFA1l(7ing`D$Ie7hM?7<+mO>x}%4D|k4e0cqg#>yHtzMAHF7CW*(hobLGVE7&q5g|ecp zx3#G&TCDu@_FbDkXQm2gcU>`+YSM~00;Cc+p1v_g)?6G`9I}=os`CB~J8tLS2KH1$ z1LA9#o9NF@cS(861bNOiL2ah3NqF_IkqG^}ulBOD9|A3!9xqxj-~6Dv-rAOS*Z3&}x7E-BzF`o4uGKmGC0HOPslXP9-}MN@we;?a9!(Wyp4F@k=F=u z16UEmPv=Du65XDE896%Y-ZUrvKB!&o^%!IM1Sk=ZzoYi;l%TTs+2Jfn{VPGDYe<3M?b|xR1Pm=>6f0*5}D}zV6HZof1P>O67>_I~tKy`9wMG(4uiV zshA_7pj%}RJ_OL@)sFJ<>Z!m@qFx3)Gb*=Z%{Z{Ar?bA2Z1ZaI%!NKr4>b~u9$lK* zU`O|RG%`E271{r2yjy;o9($0SA~-v-v!TCig9xklaxwFHo{d?!;mdm9%x@nNhs-D7 zUQ{;JJapwd<;?vfMoaiz%u#@-NAMa@h+^1^yIqyT!?~!VnmdnWbt*^@H7L~XM42XM z&lqCvPkvlUyDlX74c$yzQ2?Tpd4Kd8HwJ^g^o-A_7FTxdp7!?X7R7@_5MVd>T@;8&?S+Tu%_Y5%rpk(rNucZS zNvP!xA4wQqfn9fD;}hyAkW?)x;Yo6kHTnr3h6Y;0x%#dh8p18t!t>%GlWpqXgOxWU zK9SMdUka_X>cFf$9o!Z9jZ7j6y36=|zbNGM-)y%~ZTjCwX9}a*pU@Ntc>K%hGF8$= zZ6W%}@*ku$U%<{fxJe^^^Gw$-g8aS>B^_Xx39R|L600mG`J5-uGloHqP=<-n)(|iZnP3%rA>OBy^vX^x>67@~hgCV#adku{*nK z{_Gm_d1v;I1$6AQ!1-u@e@ApfWl|D205@u>U5Va6TjlSP8k6iclh7M&rt;?VI3myA zrTa2Rf^>24oWbYX?I#7g*NK#&zd0^67E6S|o`?2m_#fh2-en!t<~qDCE>3M_y+t)u zbpti4))B47)c>Qn13^mCzb&x@%-=g1s zlIIc{BCi!{Smxq%-0aJ22yuEf)XGjbjXVu1ans7G1aHi$>B>X4@$gzOP-}8$YB$$X zNdE=auM~^+OzqWu;#)@02Ja4qjv46Wcm_m4nac?epLif5u(dsfjptY}ekQRkFn@*}L{)a9#uDt%N{q84}JAN93Z+NsYb-2&7E%(b&LF;HSg-VDfUC`)@2tOSVt zXo>_Qz+-UB`K^XL*2;rvxAFD9FL-J%K^()RjlcjpE5gnKfxX0aOhlesFut+(Oy?VA z22cqhxDxx>PcWX1AZCi82BMGoQ$II0nD<_U@&znK5DHaN<*;oOm8Pd-M2FovnrWy6 zb`fL#52Kj8NI-!^c7@E$jvf{5^^iYa(+C?vR`8zy1p&>Ez!Svs@;?_Z&&#=BVGP(U znq}Xe7o091L=y=tf!Jw`eBs5j0l&Q8pI=kM)f=19j3jwigt$4pJn9@>9Gt!F8BXFw znX}~bJ3U^84tJC&(m+IF)~4=*Jw7M5)1mOC*f zlP>{h7?k3qy+3?hk7@Cu;GS4jzg&(-MT|BKWsUj{#3!y z2#^P|-z}FWtH!=I5ss!L+Tzt@Ee4L|`>)R*M;Bm0V7$NUj{`|ZBI3LZa0>UN<9&lD z+s%9ecY_~zU#hLyC($r93 z!3BHG-$-d)+mTEqp8F zTv&MD^R;x-A1fn5VN+nFe>V*LIEhmf`&Mq_se7Y1nS=2ApN$jp2Z9NkM@Dqv?S0}q zSW7*9RbiOl1LkgNy)W$ug0h6G`2%)iI?yiv2+uPieSiN3GpzYG5=^zJR7R6VL1h_4XJ zY4uShDew$+K7~$vcSeXR%po`h(+2oF-|kmm$y!EIRIGC2`5z>-LNzsc?Z4Ntc>Q`d z0B~Z3x9`mrbdp~X9DRUGz|fxyOTL%{qJ%^Jf`~7Sxx>wqf*?-E7-xt`_;Tj#g`;FJ zl8;YgAQ4`X3K1Hr|2KH227&MbJDeV-VRniB1^99{XmpoJ?9pER62A$sf!usG$2EUd zuIlRS_B?NHm2K$3@m#oZV}ERKt2YX#6p(?XjbOtLjro4!K(3~?E))LvEZKIKmjLbH z53j(5sCDR%$0e>|(8EWZLX(xGz+EVh^LV@_$y1Keeh*NyrNv~JS z($qe2qm=*|yqI(~IVg~j`wM*0s$`>Icy-t+pO4*pNMOQP^M7N+P=6e^9)whh0L}SU z!7O|$EfLJdNk#+;5v{0S;6bVYb{uK894BJChv>CeUKqk2Ko^B+5y#sV%k`!{8lpQ9&uG=liPK_cDbhXisj#g=ay!?T zqXrww6Uw{C^lkp$&^OT6A49T+U|j3a)KGIOuG*1O{OvfYKm6rS!+NH=7r4pAaa`l` zG)5S1xXU);e%>po`ZG=uNP2>+`B_C}Grj)Oj*|VxE{e3xK+e~~*xBM87b`3`H0w9V zQfii?u&-C%!d*`Z=Gd5wAUBUydP#f_ZdQg*v{{tSsQki8)(P4_k}7ziXAdA2lCDJo zgkG0>Sz);mxY3gG{9R^f7{r;W)X{*g5v9N$LSpjAMWr^~o%1|B$QekW-6Mc58fm9k zs?z7AHmxp=9`pJR)${O{9C<<~`?25X*tGU`P&#XH<0T;3$#KwxIpxG{=Lg4mvjAPaU^u*b!erOo46 z=^^m7k+`NhoJCX|@?&*-lzPK}C^t+0BGZhJmtSA!X|Pt1Z+hWve(S4b2Onls)6~fj zLI97O>EZi;75&f($-f-=R|kyy$F-O*UZUvTX-P`DMhJycyv|@YJy6!E8xOPuN0r_O zkM?(Nk>Y6oa$s`_)5*&2jO*jE=FRjwuN9oNGSV~JT0O%&MK@eg91TPva zvXnLg4{0nEbK3wVe{mknKVW5t9IyS{%OD_G4vf7qH}`D2Mf{B=ypT*-FBim9EU02CJ~T&$^XdWSDbbq`am9%aon-O--3q z*W%r+=0P~wJUTtU$yy3#>5cj`y73P6P#uN%K;qI~YRB>QXi}lPd&?pfPlI-{umva} zvX*fr5-yAM@y~dESi+eLfhna8YgI;_dJeszv)KSWpm{-Ewve5dr4dgL8j$Ip6wDH+ zPn??rUMX6bm!{gKu?{iL!ca&cai&%L@P;l|x}W85)#Y}gR8N|I9fyfk z0HZC@+~N7p4gGw166J7b*5t;+W6#eZ*8#7KM()-lxBW8<3l~aOoGga7;=%j+^iiM+F?pz26b#-&;6tOL|T3n9*dJ95dB%>P^ zmmYHEv9sr_FU-G~G2_6hgcc5W#~p3l5U6~vqH=u(c|w#pWaTP?^pZa?FlNd~Z35w7 zsj>|M(hwVPE^5uewzk%xw)XCY-ysqO+DwWa#ywXGP$Xa9$5^Hh;l+?wob}y&qxG&7Svt_vMwrU;NgNKl}7*6eI7K`*nw?OyJL>% zj%)lUCg@Y!@v?c>rbsIX(mXe^Q4?usD$vt2hRPk) zdja+j0S7}a-=hY=0omYyu6)CGX)cZ=Rz+E(f~n*YE3iiQ?TlgE<$&y@8^V<0n`rRV z&W(@g7BG}MQ~AC}SxxDPnQ#)IkWw)?z?r-@+sJ&H}?3K7TgPKJDY3v!4tpzqisx1*N6v4 z;r}Qpwq{I_;|9A+>*$2&KNhmLEcn9)nzb;s@@l(;4{th1#G=Gl(_}sY=_cqgXS{QS>7nw4k$}pB zGW;DHQDuv*vMQ|sg1^yP6iLJ~W0Pbot0jtj^@f3}9R4d+f92}UvdQK#L>(fNMG`c= zR6$-mkE^SzbD(r*v5nIHu0GBxwbgxXKrdyjtT{o=K5Saq9!b#-pg&1oshGdFX-@6# zm0{S>>~P_{{<7=&g$;r^M6mTUB$jLm=1~enLu$_WwS25f3&H~C2$ngY1almI6s5t= zdjvX_$`QDcxxJtMI!8(ANN~<4?Iu2rtq}^gA*zYvS16hB#gSX)hFQK*Ew9NZ;$GZX zf2MD`uexBQTRCB739Fmh3qcoCtuW(d5H^FO)G{A{Fn8ktL{rUJoG_^-S5_WXtiP#d zKA9&M-{H)cR8Y3n8_lXz_?jlem$!sNeZOvWbi3}qoB5C8UmMdc&}&pC zYjJU6*lHM>$>T%2(rwKUrlcOFj}68`gaY5bQ;hxn<6An#=o@?mU0|`xR^pL;+cz@M zoLT}-xY;o1>?VjR{Suy`FOTZk^09k*beCg5cbJCWxPsBrFU? z=G(|xoRtjb@O30(HpY;h>tU(7IBsxG-V`Dwn+73kTde|Tr{dD15F4t;i~a9XV+0oJ zgcV_nr+vR!xCDVR!sLQpddu~nCM{c&ayzSz<6e4k=-KjJg0D`8lXXB@yGC52mNb?E zEC&X8io%%g*hXCb8qX!o~|E1YP!H-UBy2e3jZ9u>~@aPt`S9b zyMvhV^?rUr5J^(*T*;n7z6JN&V0kzeQh5Cx;SwvVk~<<-KkLH&vfG>gpBu{(V|(T z5F%A?ey_QIP(-baeg+R?hRX8Eq*wLkVwE*CbqJg~MXw(5{ROI-G}H0=N0Z3Z7i6G< z^MCs4EFA6m4#?7x(qS0+Uw&O9yvXIQ1|Ca-o16U175=s_{c$h_MFnZByqs(}(SrpQ zTXA1_PQfu^t}%3L6DV=b>H|fl!>I21pjta1?Io;fqNFMtx@lN_s);WW6ok*K^WgGq`ND2~$?PPx~0IS<`6PVLEZCiG7o2Pz_*6i?pjzJSpK??&W7PJ+vndMjFW#LQVn$foxul!;6* z@GnO*S~bkwSj>t}(lU1a%o0Q+R6gL4OhF0)R=D9IrYWL9NkDQmvn*EA7OYW03WEm1 zx&Rjz`>ziLfQG0Uw^qZiGQ&FT24LVOof4tI0S?a2W~|A;+x~@qyh)WVV6ZQaDgoR#A7S`>KNav+A(Lia3O&{&Qz8Y9_uxRVDCctD*=Y z7%aq%Rh%M~UM>v?HipOiJ}4XW|9xh7k|-u_Trp@ln&u0U3YBy;(+?|ZmXDPAI>{X_ zJmM@oi>G7#zV5)2^gtLUOEmJWudkguw|la2&64ZH%;DFm{ezyKJd(r*X?z!{R z0x=3IjvOW&I^Y*A;$xMsY(s7hMA3MI{YtS&{)5tmg6L7v*7Va8rKXW3+{MzloUs!2%`2boWfL=V2Oj#Tu9~Sx&tKv0 z>Z#c!V(fVoE&BQf(F+cVxv*)lp;@$LZf|i2Wcz8OYs*Hf8}PV`S}fjY?pberECKXs zW}F~eCUF@9Ga4xKzMx84Fg?(Hn>lxjxd@A0Lql$g;YaKxq*fZ($`9E{g)7LKT-#aP zoUDXxNKG9QSTsW@J}Bia9ksI3dbwOccQ(RFj@mn+SX^377TPhU;6Y?33b0*xqKLZ6LQ<` zL#mk>|M-I;8WP-V_|vzGAFIRbVk<+b0y_5+^^db}Nd z^;f&fu9P~KbVv$r8lv6G(>D>Tj5KQg-$d`<%mT12XDYe%v1&z8` zlhiddM4M$o3E$nG>+6H==2HH+UnkX5lkdR$@OifU)l5wtpI{W0r!1xMZT9C|VK5mG z^#;{8sUZN(!0~S84iiVB50kj+$Tt$Eijq|K4{jbDTAZ4y%3!c`PKBr*Tb#3<`!3Eid}bISJi$gy$Fw>$hH<*K)_cNg09>l0u3|j!3rB;GGP*L&;w) z?dBIqZS8Q!md(Jw$Qv=AJkrJ8BF^JqonE}WT)HG?)N_SlF+B;1#Ta#u^xoN#?*}-3hSp!3T}|xY^%q3pO{9Zm@^CCj8YU zh%07AnWv>4=fG(%`85~WnJhJcnH>pSMvU8}*ngWmnz%l;yJv$6HUix5o*68iRIHo~ zin8e=HFa%m`Lgjt0vz4UzW}4j3zA3DTK1PmS=ZQ$u3(%#AsZ1y4PjQe^>Dxfgp{5qBu2_JOY)f--=>B zZjmxw7{E2u2rN6mt`R!_=!+T<67;VwEZ!aYbLqlgLE|FcyRrVa&QkIux!`K5cHQON zmeSgorvadCUk(DB!xZJ~bvFazo&*rD8stC^q0r$|0#GxMQIg!R$`J_8rX5?lj)y z%A*9j??3)taG0VOml>0pA{6?oq-zX_XdGS0c9qQA3ps0>vFD z5-=jdM3j|2OAZy#J14>gQJ^SJmfpJAs}G{B^SE8zhl(v*T`W7FKavXAk4B5$kWFzh zE-Cw&Wm{OFD+$vu=$S2?{pA%D@V#;Vcru0mcpm4|l#$6tjt5a;W|gMFLxbE<`yiHB@W{75>|`zslPEi@{m$&NoBZYD+4n+^fx(}bwIxf+o4uX? z*Tl@Uk1se=Ec+pmLKz{k^5-I)UnKLo`XT%U0UWv@elCKfX}y??^6vBKb!ayqI1y?# z;%Ow#H^N%vy(@O9xU_Pcy6n3z|MBWJYtZFxedrSHS{F7~Ki#y}$70w-V-cZQd+!Ko zQzp6X7wwjLACap*MdF+CE4$ytaNo6(kq3>RdI6H`;t`D!4&4aPMBbM!}|JPVf)clG9HK~E-nvFt{WXyo84Ka?RxHn63uh2s!^3c zC(2*kV@z{=b?NoH-h@RD5A*be1ig1pw*(@@(P1hJfemKbyXTD<4@7v<+<0;N9Uqs! z3wB<7cs-lgvr0jgNwjnWvuxzavw`mSe_gydG~3{f?eO<6nDjpec|cVq74@YQ$?BnVgY-RiirSd@o1m6ct6#nJTeU{Xop?hF>jkjC=k z&ftlVP+&(VsHEcOkd!V_tw}Km9}OPt&jI%ajuuJT0qql9`Yhgl3iuyaB2#P*h89pd(2n4)8LJl;b3s;iSG??h5 znJ3EkZON&pCQOy_%z7pB^a6GWC*r{C|F+2-BSz@})0dqRGcM5M4Q$sHay{xMGW5PZ z9IOiOLG1?p6>OiAh;`}n{yYBzz-;kZ2b@3GLmUurHt$bx`B@ZPpKnBH5|;-D3Z|`f zY+Mi}e-G~b{PyOFYgfl5-l3xZfZqUQ2=S-K??(^oJb~Lm=8IremixE~Ai^9p$5^@& zcskI1Rat%O(Q@C^nvrI{0z-N@t%khm=e5_L3s3%e7B`*|CS}o_vf#J@Q@S6eS)Jjo zd9oEFkXP(!RbASfmz9&7$KH}y5lgNa9KJDpa#vo7Pk;72SYe-@9#JVXRu_w3MW=gg zL+WCSV|ENzp7d&Fb^7#+z1Ht{0!5s>LZHvty>BF%TWfO**Zhe!wCL`Q6+-{h-f(2P zV|BF{cvN>#jBN_`+1-2aUdFfixW2t=gn!Wet6?@$>z!o%w!afx75`juv*fM1MEbv; z1STY%N)h5~hTKVfq=a4>ij!&(8%C?#He>)v&k0(Xsq4*%TqG$`QC3y(*6`nP?B8vD71#yD`W=Go94iY;I@>qluLdg?9R*C)5|PxFLl*nNWSxW_ ziS%n1fkd*!s1IlKpCZtO6UFQtX#DQ_W-A+#s?hQ#8O80gL+tR_l8TSo2Lm53XUTni zjz>p1#EE3b*L*qoIr$VJ6Ic{}8n7~ObvoaSu)!$tXF|2r4Xn1Crxh0*S>x^`R`4Wj z48ipu4VK705Pg@QI`|T7p_D{riSxb{Z%A-*Qp3nJhh^=~>0TCS$W$s7Ux1BOIv4sVNUt@VG3`*UuJhSw2W?R?zK|1rs(}4y#`Ek{66FpU-(O< z0_<8j_CV|jcC_huFmtmht)mil@-cqeUXvV1);$Yet)5A9@BC<|1q<3RcQr{zGp8Bw z9hL$gkB={REV!(GVH`PP)b}}xX zM2r@f5izHvslNZ<-Dc~#{*69D(?N(ZM^{g@A;+*d3zE6M&Z{c$4?BJ9L-k7a@SBnf zk{^+~GskFE^VJ$F&j55WVi=%JzOQYvD5xTux4SN9rL{&IdG>iNJw1}j(q~ym6wQbXMb3d? z3g6Vg)A#+$B+Z?y2+@Fx z-8G&Sc6Dl!VF7k8Y8<_8?u`F!u^)fyo(>BISsSUs%aWY;DY91RJYHhwNj{2HwCd`y z=Vc;N(x-D7{}`GmzErxR2}AZ+G2(X~a-3-FQ@Ah9XQY4o>vanBd!os=ejsdVYWi^c zlPgsPTEXj3-sGn=Sg-S~=g>y!py-DBj6bs5v5q>zX`DH&!!jhz^hVmkb0LCE#_T0! znZ{7D!C^OziENUr`*$xSVIx}o+TmbefXtrz_YG#>$#-fPKmCf$gWe_Tx#`SSO=W2fn!SQH@Usz4Z+q>(jk=%(^OAv@k4~-*Hjkg z2yFVEC)|9N@bu`&hY{9gZa-)dc0xkZaH(dndP)bkrH!;RAF#8$*JmDyfC0T7p}4N} zBN?u+R~p)r4&($zD@v-+222HEnrF0ix~$~c8bjAI!9gqI6|J>)F5%li_Xt>}D=R!h zBi39h>(5ecM({=!StL7AYv@tA49%d*KWC*U=h!O-`B|Nu`ag!VLn9k4>VK5N`Wc8y zsVKU>m?nU5?~#~SJmf%?;Mh{iOr<2L(j4PY_FslPPdnU$yO^*gS4_(tzI+49=Z?wb9;ZmWR^iZD*(kzPQNp7{Xyhix4asVMZaiDCj%_Wx{sN%wF_+KB2 z(x3n_aMy1~9&0}g+SQI6q{W2pxVlV-6G8CqB3Uz-(h1PqIqZg*r3z39?ndbqGMqdK0@G8$n`lS+;eQ1J-Q5sF1q@%(2%)D zO~Zq_S^#@{=&yV-de%pjYYSOxkSXdyr>+~`pe)cid4Iiau8+U3;85mfJmfKt&LYh| z>B*?WbtgdcmykOwQ?jqqRlRVZ|4--t$JAR#MHQ|8!$S`s5+dCpEh!)^ozk633j-+K zjnX|dh?EQ^-3>!`H;90AHw^E&-{1AFcP-XB|1dCfX77ET=TpzievZYK(|%oY9|%G} z<79yNP56E7CR`omPwC|w9OnN6A0%izG)rG}tda8bc`e-iI&8M&cTfMkvE@AegjjpZ zg4rf5rbWOB}H4_Di}Odq&p(Y_ARb% ze=cJ^YWhqrFM>fGUL|nw_AOjf*;9hUQcHU)SvmnJ0P79IV);mQwWpE??|C3s@_Wuu zU2_)7#tF3Ya1zv(8?D)*Bj%@kc5;yePh-4$9pb;_>-!DLj!Omp`X+7&$(#QkCM}^p z0-_yjU`8bBVe}4%!O`KzgKy>;;~uP+Bg0joUp>dc3$N>Yt&bk;Ul6G7uJyC8nRqm` z%V~p&-H|HK@}H>Jo7H|(f<3iMedBmN4|;fR->j{swTt>Jy)gRs&w|7;qylSX z@$dW0m;OT~IN9D)N|5Ab-M*`xCk{jy{(nJ_7D_^*j?q>Yl%$DhWcNpIJdh;IcRuIp zD7-E(=mzenQBU7%O1ce}+{S9F*m-7nTKgKZ5>CTlJJqNW5k<3nmyai@2l(HN*55^ z!imHeKSK+y@Wr+FqNuxU{^?EBQ0D$N@5OIrlZ#k_e{U0=9PQUWRzb|!yYx~#S%^kM zGfOQ*;%DFSrMC#|2f?R(Tjw{~peUNCwNgEy+GG|Brq_#`o4jVzfkzeinfMK`#lL?l*7B`3`WpCXSZN~&IfwS8p(IOnW%6e|2VIv%W6!!nzT(nw`X*NB_ z_Yw}aCpBmETRUp?rBu1xVtkiVsKBFGI60Az@bI{Rcl$-+)%cy23(fEyu58K~?3F0` znB>gaE!LIqqikxLu6o7sJiF#NTKB!b6G|szl|+h_)5d#W7_G;1X=8J7m*?j_wZ829 zp475-CRxt?S}P@lP^J;6m%Df94aI+av9dI1cB{#HTcaxp)Scg09^K}Yxe$@fcVP3iIrin(@5EHuR3iu;|A0VJcxMU*0P z2%Hk8c;zxEkRzhBE96A2#qIXJ`JBl*#xR5-aX0A$!_TAm8E8}b_9ASyDcv7&5%i(V z8KFd?o1&XJKee!h~oukT{2X>B3niokEAtpiog7=xpf-0udjJwFz)hg!gqq zV=Pls!un0LxR{)2Pz9)PM98y@V&D;t{^!^a7Hp^xZH2#<0sW--WfdBz5hJ3y**4vF zy^5&sz{YdFGWrS%6z#h{05HKX+w0k?<;&7lNh~I4prF6R$5}@XjwV`08uYWfpehLCg}M3}CnC(sOSCtA~c2ImO<+L_N@H5NA5p^sOsSPrTh zCfSVD8kX{Q2O`$-bLL(|G95n`si-KI1TRdZb9zXl;TIG=KPrYX=cJmJma$jcm;B8Y zA#87HIXH-a#NGhI6XcS24H21=|2>yZ+!8NAykXq0IWa@?Caldj$G-M`va>VzWS5xX zV6`G_w!-&%&A$duAF1x})eFgx?QPEz5Hb8BnM zaxn9SB{t7XPi&;5Q$_L7EPe^R-(7vY@Ki{%_!uYq=omN62j-W|m{4JYFWfmS_&vve zvg%vn<7|@@W*G{Ti!ZwOadTBR8m71T-+{~BhcxR6Nsa1Ftm~jEw4jiiH_6<1(M;hg zJjrn$GkHFk(hzd4KCkpKHnxNh`V$^q@k=32^(Gr&ZN+c&vCePOGSkepq!onD8Ac!C zLs)X7<^vipp)?&gu3g)dl}XJ)?7VO1qR=YvYj1S|Y4BB_Rh(H)3_qhpy>i_P8yh76IwMi1YZ84D_dyNgV5w)$$d} zL*T_qykh|%ni^OOD!tF`>Lvz?RHD+*(@^_ES_qy69$`iC5juSh|6I=Un)~s9*&<)Y zCm0gj9p=Y}FAq0a^v?q9aC4|uRyNOzyYAWkRk)N@39-bMt#|9EK7tAIuL3Pust%SP zh+Bk1$GOTvx461Um?QR=K1O}eHpY@mXfh!?N%0VsS2tNO?q-IRRRFU*)~}bJAu|tn z_*nUU`A{67IFNLg%Yoy{@g})edn+j@)XBo$&F`a|Uo|?A1P_e*SX3qr?YLEio(%s! zoBn+UFMprp#pk*<`W)1e6-Ya%=O||fBn%d^o)2x@2h&gJAuHUtwk*e`e6It>fe|d% z=e6cG@s#f;@}7^qDty*0Nn$xekzcp6s%NUJFIG)Lm6bib@s7L%+*Zj7Ac}RbYu@HW#Oz=9@?Ma zlR*bv)V)c;cqD+*ActEt`TI;(9s0Zg2njZvT7hYRa{T&$C`g_;UAWC1AeEGV_hw1Q zbV*P1E$|+)Q=g+g-n-Sz>1xw_BcfxA&5mU+mo+^M3?e_;zw~met>@FyMQ<^QjI*J8 z`&L*$M6`%}8(mI-F~6*`jfb!X9Ec|mF5s|p`Y6;YMv>CDKN*oQJ_?lu&w8PpO5vGn zLH__gjERL?*D0&0#rzh&=~*>hU45#Hdm{Q0V6dQ+W;OY6)kyEQ+uU*&fdtrFls%GR zh#`$ED_yjy#IWmURjGx8?ZN_y5i0d!?bPwq(Iu`ZHC0M0RR-RUR0N*Hw}Htpibc9$ zCb^=Wn1em|=z77q5`NnfQd~Y_;uL@;Uu_cweDIKb7e#~i%)XIEw)jUepb;u@x(%+3B9w<0i5^s~3m!D*x)9d1YCU6j)`6Dm&c` zkuKYn`%#{Y8la@ie-Zms&Tyzh3!#K41O& zS1`eHZUuvRWn%IqajMTzoRlIXi-{i%4@uEC{VL^{s)+vso(B4}DOfS{{XlVE)H}IR z+N-aTbJjb=wAbdELqg*#p854mw)xc@pXMi4T3a016D>vF)bojm+UXNX8&x~KY;A7d z#9)fiN(y~8luxw3Zxf2Qx=a0VayAlD?&w0sJG%rBLN}eN6)W73d&f2ZigMU69|Ro8 zuJF~X84>G$!+GTu*jl#IwbD3(Qwwl4S}HPWwlAh?xWlUGZr@WhJ$jso``+v}ZPKDb zeLt!m=dPBAluLnV!s2IcF1j=px#oyFg90YgBdP!rFe?7Op%cf>Duie0m0!-)Ry0N) zuk*tE#qdqDx0}z?%|^r|FlDIcRkN@GXxaUKV4b>RRe5(eQ@>$I)St5h$1wOBjNHg1 zC=cZ|xe!Sd`b4M{@DBddfutPvDnu(Oc~%?Rw^?kx`657Lh|f~Lj-^(Ug+?J&e7(P@ ze;v>KkJy?bd7}|W{afWdRjOT5{hbF(AQ*mvu?gO^#;7@unel$G|#aQb3$KXV{ z1h2c_hjR2&yr7ON_+oeK%9{A+RE?{x5ns-EHP5kj>6?t#vGX53h})%=^E+f|km-<2ugWlC4p z)=i>ZTXFtXk1ctF+y3M-5{y&r+9WVCeamLK&uUAXG=BMXw;*#(XbljMJpo`3;#ck)8XVl?%;Iq$B z`u=j<;T&tXGyHQGjK~eQUEvuD3hZufiaA;>U>Wqk>t;d=!9uB12ogJ9L!6YNPRQvV zm%=wezZN5^>fl~c@y5)v9(K&;RIyhnMXEpqm^dk=|F}4b(ym^DqEFjV9Z{qnn64{R zDQN+egp9f7b7$}4K%2$?iFGLE;xgdlky40N3iN3yMHhcq23yMNMb4P>M}N}*0FS3s z!{UxbcH+zSwQwH;L_^iQ~*5vY}e#GjSN|{T@`o+}~Gngw|xk7eRemloTvLzkHQst1Y ziLK=7`Uc?D!yZh!YoO+p@w<)1#iT!($?L|qJL6%~$;pzaKL(~h@r9l7kkN!V1O@La z6gqsap*tNu3M+<{0}LA?w|Ms+C0IUzW;2%(&6U?2@0a`4^&72}r?0qTZxUIIv^R`$ zhcQ$<0{-C-75(s1U4dR=3*HmGN^q|V4QjZI8=nBWZ`;Y<-trd``N@({bl%szg;vBx zg;pzTzkai2_TSfnXFVwuzguvFs!JPd0Wi>a5g(pJebyAG9~< z#47B<7&|l}lVP4aoiBFKBt>9f@+;V?2wLxdQLA+&qmC8B=aa?Mpt4Mba(zr$!NGy% z&CP(jmYkBBoC*y#I2DGkW|3O?mW2hg&HV<1{I@#eKT%>55M(ZNsHhn4le@Uk+|oo< zylR_ z^}sOp4jZXJT*$qN504@g-D3j<+r zA4r?gy&8wM$0XM9wJtOtnZ808Gcn#}>nP7cgci*QPXZa3O6J3tJx=)c&-JeD=FDAR z6AXji$={5*i7eSiY{y;Zh?+F#?`mapv#+V{&fgOc`155|y@zn=z#^3X&ClozTF{q+ z-xoSzTq+Ul-Q2$$9;kYiBOd~;b*vtI_okC&add}tKWGz{V5^8 z_MqiEc!->MnD0rxSM~EmXqj3MJ<2_ne>cpaMh*Dd@9*_^)zy{o=e(&x1HMh1Lh%lP zg>J$A_Z$g5%}@=So_z@XHevV`Llqf^8aNuMl$nIqFhK-{Rp+u!pIID(?pgMFsAg64 z%gsvneF|Otnx7|QynJJFfy5}gWv(|$H;~_tQhw(;9AqumS1#&t2gJR&yVr>M2!R_O#&Xpv|g+~K@R_B}J*QjV&Ga_O> z-7{Y_g-BKZx=Jmv6ghpcfYbOVTM**^ep$=nvRZ?P!kg9B=IR2|Ha80)xoN%CjgR9J z{&QXE!nS^SC)sJi8hX-1_W}G}o)55thiWCCuqEH#vx&a`jIv1Fg+KJ_C1Ypez5ANi z2}7oQdU%pKmP}^`OmvF~+sM%A5Uc*DOy)q0y#jZJ|m3T6!V2a98Wfi<3PvknZ#RQY`eOvY>NXStB{Cfh** zth*LYAvE^Ym{272Zz_|}G-Cf}n6ybesPV&j-%QOv$8;#~c{w=gPBUKC*b-F8U3!Q% z+5WbVeAS1Q;T%KDJli8!kKS-7y)`@qN{_}`tq}y9dD=2V(!AZ=sMeZ?w8ccmXVO9x z?3W4){7l2gGL#wKwLK-*O8|kV83mIi!1wt+92ll{wwb$O?(MC5xW6>B%F|A05(Bp) zPRUVKmNP#MS}8m00~zkF9VB@E@>Ox2iB+Z2)w;zcKEA+>{!?OFkDO^vw6r`o_1VYN z)NNCUoB+wppiadme2x0TQ=^#BpYVjWanpg*@{>Lj?t$m^Nt?96(@YUD5mCw)RXn50 zqs%DvsFTed`bg}^`R_`$ z_w8*=!o?7WPZ!N!uV!>~9;TQ~Ly1!Y*|`--EeX|j%;iWoR9Z+R0ZkK$O0?_o!J23_ z3E~Wz2gP)R^iubn*t-sbavug_;yVAQYA9^r@BfxgtnJ3A1>NGsxUQX^i&h;oX>nhx zInXVSFmib%(mT^yyCzblJA)TcXvwR1e!Puj2|$4$3w`tkr|KV3<=a(NH#|tWHze_!zENM&yzzi^ivef5T(2Xc#O@QEd)^pELhiy9?=8VgOvnc5kYa?+_h3t zj%{ogIoMdFa4Xqcb8Ew8o zgdCJ}#al#BsJNd8jj$ohj|3Gg4^D$ps^nAb4tX4iE|Xkk(%!R~)2yjxpBGcj&s=tE z$~{{`i~y@5R`rADqlKB(6BnFF_hZ_(Yx|xXAB$x5b#+1&^-kHjHr|`gwj=6HA*=LR z|NFDSaa8IDnaAM{4gf-p;6KRycD30s)NY@YC(P}+V=YV7D5RUl~?uI3IC|7Uui#(Nw!~( za(qGkV*t()`&Ch0{MpKHqwCNc5YNAiPp?1;4v=hUOPwIA)QS+go*IN}nwKi@SV*Xn zcf67h&2V|8J0k*iizKyP{_eDCYHPAmtlM3-&#qKeL#vw1t)k5(*<@KbiQX@!7O*fX zNDhxT;^br7!x+tXc66?4!FmzQ%)97SB1{$@=OLo4sZJ0B&laUdYq7i#!rY1A2%!nu z-|oPFub}B?RX|vFRZUeBiOD+|^E1!*!ryc8 z)>4|o^SG*%D+|+tg8YcnWJC7NQzL6)MFqaJQg%*#lW;L|GPDR(n^opU>f7G4dlB)c z)#Awnyp7gxD|fnlOOSa=GV>b3a73TDPrA3>I>{OrAR2qx^FHRnLO5;QfBi{|%8+^KS^mpLBqxQcZIM3x~BmtxOB!slNdpwy^ z2=^@ai5VFg*5Q!cZwSi)E#9Ba+{*T+_06EpjsJ0;M1(Iph8s0kcu-Q=$`diJF6wFl z8B1G>vv|5ZVG3zQXdZk%J!WY=GMXtNsVZQOO(DTn{~PfPBY^3hdcH|q49|aVwqbT; zbhyaTlxj8f(b0$c;{*J`Rq0Gn1J-kFXk=g_OldAwA(w2~UBe+7Et+nwjmj~Fj@Qp) zurD-Jq1CbPTB7t)7~_i;#USiEG5o&ixu_aDDjDb(K`gfh@r74zA<^lU!!j5zU`{}X z8&9&l{%yLvta&99iWIJ424d#kdLK4O{s~63lVYaMbP{>R7TYE2Z1WVqS{~Xz=;-OZ zJ%U-e-1l7PH)e-T|4uLr86P=g(9zPj;2AC9Gjvv^Am|9%FGJ_7KKcerI0=YH5-M~4 z3c2{h(SglluXmB9!rkT;xd8R0xxSTWEG5tWI}b)8z>GYzWoe=koFs{g^jTa?O_Kpr zYN~FNMbzKvW2$jhnYggKBJ|{#bFJYrzhj*ol6dX-`Wcdy@7$5($KvV!Y2~X5yS`+= znOh+c&tJGh%tGt62m%u*77;@kLY^(jcKD)VumH$XF*CrnGF+m~UX4|CP?a zig5@m33)n(UeQvuq_GgA(mFmi&e~vbU5&PhHZLv_(Z!EBuEt2yNkDC08}S~m3xF)1 zHGc9jUZJQzIeqxjL+7R>r80hOXks7>!vg)9l2w7Y`f7(}^AORqk(+;BQPe0O!?DVA za8T0Nu7eEJV=k2@=*jlC)L~Tu zEIfbi{IS^b(ycB_xXHA>6!*1BR*tD;OZ`-sI%PE;54Ui;qnw<(_f#lGtY#&u#ve&5 zB)m;&)<;!pe-D8V-(-yRBQ^gb=Ydulvcedyshob=H`; zOH32=RMEroEty?;nJo~N^BkX+kiLJa$=1#o2gH>vp9V!6UtG_Es672%Ce`nvHg0yk ztrHd;5Lkd?59aM56|`hi4B1=m=(au2-gw5Hoof2f7uz=PGvaqMIzivuOl-Y_xSfiu zvos8m(66$;k2J)^3u;t}psC>~DK6hm*%`G-_qpxdEDG*cgRZk#s{Lag5ySdS0r(+R zuY{{2bmI#|)HO8;mFU5eT|aFX()_qU6!R<1Mb-4#z<`=%Y9)qNKmRuS{3w0sC4AZU znUir_#{k^o(}V_2FIjR)(x1MnJL2aiyr}R_^%4!|(15^|+lX=dfSr zcTWv6nat+=TR~7x@58IbL(v)*44%z#=?cTBuh*&oymV{hWbg7R9T^>eacybFNS8A5 z)zPImN<`)&A7Qy zl`RApL+;Amy-r}d)!k#QNZ{yIB8%BlOjDv|``FZio}-@5(}|wnS(*1~UBCy^&_0g2 zW6Upyo%J!r4pd$ADG8v5!5LHvlD7FVhBB}OhE4*HMz@<5oxfiB`t7fxS0C7X8lT$k z_-Rg*5f|E9K%Al)jHgJTTF0bvS=;^)IptU3c0SQDA_q))pti`KX-J7ogIXpIS3k$< z%*&m~o`nL7y)BzkH5+Fs3K?9&%7%M!)C0zidjBec#XUzjSYZ0`_XBB#Zu9BSr|rDJ zmQG%Bc#X3A2}OFLSqMo13`Q5g92`OC-9i<`jY0!ScX)(kXc!9c7je!{qvNS(6u1QA zIWU&zP8Fu*5Fg5W4e=;X;V5ydu$7U^yGZ^|QbDk=*wu1jr!R9W?Cow2#w>spk`@*) zPwt}JV=_jtbA-?nABl93;4hW4a_-wYRAJ^O9qk?n&bLm*o5r0v2oov^wve^norHf# zcO1lxT4ojAe~cHIf5DUpu+CHuBlp&TZtyEIqVv=iEMcwb>u4S6kl)^N-yu)_Z7r$! zH=y01*j*{^$1+bs0wUZ1@MtzH{hTRQ0xtSK#OL$QJAP0qlA;amM1SjgzJvk{(rq}0 z+znX3&fD(?8uw$f%X<=|XOaOkiag2Mk$+nQ#eht*l)e|g5b0aOl8V<)1I6YXwKJ6# zpRAqDRV)|Adz4kZr-(XtT83XQS=D|*2W@a9Vzm!8jA$0eR;(g)kNuDTw6ztc?*&eT z&qymKezxTrpU|5#GCki&&UhRP`{0*CV@>7ovO=KHBy*&9i2ZSWdSl}>?R24W#$)hT z>0rWo;1T6Z#$3bLoRQ%XDFhy^oT{O-p>dhzQb_z+0-lAM`hQB@zhP*gDZ;Id@xMJl zTj@FlN33Q6qGWk?9esnlsp?~z`gDHOBn=sLboeVO+*s>}a+1Cm#j)ipcPaj-tyA~c zd~@33+N)ho#(8UopEjy}g4?OobrRI!TZF zIc>2tgU0mr%5yvQN4POD&HBx1Q0nI~HT=`r^DFm})kQ*kb~|)z?^KqiR@603+VEnG zyf2an5{k3YHo`?)-&F`$rn)A-e7Y-Ia}VJj(l!ViCVedEJ}+!@TEIj@*g7NL;oJ;Oipo7U z4gL8z#x=5L(sEbruBxL8^>1&I?D7EG;vDgZmb#V$C)4_<&)nup@{@e}(1K+%!;uIi z73eIqZkK?kydsRL=;_e^KCb=;1majKh1XO*Z6o5jvwIF~s9!AVItg)wgJ;-x1g)r1 zDG}+02HBbVHjvUo2Z$T$3yZkGWWBQzL)^BOM;b`#ZH$fV!colZbqrP1ep3iu?lhkY z4y!dTbpRe9-;(M~%}yuD3ZTLP(U`L<{bVN*^!z?nW`IueJ0b3_#bl@oP&r$=6`B+6 z-p*gNKaPCeiKX~^XtQerlUpuPp!wGK0+>rqo^8ZCG-Ne2q^H3^YBoQsRo~V^6b!=e zddNnKJB05e?mLNfH3lxiADiD(FSETBN=z%fw6M6?EU8b?{lQhyk<9QxKb{5^a_Opn zEI#aUh@T&F!-w1NcsV3ttZgZ$nFY~^02QF}s2S_*p&PJ8*G7{)AmDJx9>{(_FLlDXIJA4FU#x03dL2T9aF_-*s5aq+1S5rCs#RF3SH!IE^DX63pm6k!* zW{|Y!I2kX7F`>{B=_qA!JA%Y9`TcxR;iI()jqA5&KV|(SA!#JdL zZ5^fj|Ku1?hKMvB#I5tyJ0{LMJ?_Zs+pJB3*Px#Dui1uv4u-Bkj1{z zbfc}dPYjqVVf|t&B_u_c2i;Lm7$33skaD~3b%?dRJm>4 zE{IzJ8e~#Y&7|2>=|}dzUgv7yMMwGU%8w0e6RQL47$g4^HWh5EGcwwI{@WZsMBdTW zZ2Qwn-!)L_t8kD=4NbknyjE|%{@wC??A2*v+QZ!{3{7@u&}wI;2fd|Oqm9Cqys?4c z?UZ%xx1UR>6(K!ru3J{f#@abA=ATJmI>O+|#;<4twy_wI7)}?>r}YJU(Dt4?*E6l$ z1zeslHPn1OInf$Si5npweIvno0UzeWV{H-esAJ-aR6M#g=l%|BoHr0Vga6pVBd+^k zMP|rh4+-NGY#29~9ZoRphgvPXii#sft#oWMA;l;iarZ7nPIuPlmLwOGjv7Y}KQerd z2)b>tId56cb`hg=5%$*jA8Q`ux~eYcs@P!TW1pQ6a0J;Wa{p1W^H5N@^%ny*(6av6 zzM_mhl1a1VG{i8lcpXwtj*SCyi!^BTDqUx#nH;L{m7x#mTzYyr3g3N5n#t=WJa7_uvU-$L@@cN91w!Yb(JEQ$1 z;G@#goqT$h+mT)>@tYHkDW{Bax0;K+?{ zOAn;9s9eL*EwwneordA%5AG0Or_NJ>R7)wYUbf&mM9)hy@y)4G6BYMYEqdp0`YaW+ zdb`A7!@8UDdHBcbuob9wgJ^v%(?2pShyy5|!e83mHsaSs6TvD14Ck)HbGo9yv7&3E4Vf5podck9n%Ndg5>ghL0u4`0@9 z;!>$=%td6_CD)s2XXv9@XgC7LLj z{;RG55h01U)oO~qRFF@8A3obIXcv*ckgKoRP=c)JuI}v9C9m}`ZP{fFusv&gJdP0f zB1`N0?=w`4k+aEN6b67ml7Nx;_;@jX+7+xW1Xtwx1q3$hpZ7ULJoxyWwfniTKvL7fI`!8$jl6$t@b6ew zv5$o6g4_xHyEXqw@xZ8S1hZS9cASQ{my2$u-M)ryh*D8aj zFh(h&@y^nJ(n;h__u%^Xpk}Tm^tA0}JhPTcjJ6*8FkJ0 zo9Fbk7R6f z`gcz%x@D0LydIbfVzhp@6Td)@(tzs)7QVrwMQu-{dgHF{2)kxCSBB;{!LYjiN-}&} zxSCs>nQb+R_>qJznGk}6GGW1!TZA8i#`%&Fm4@)b6NL&lWW1)-iTyqELy1oMZ?6~@hv~sCk)!IhIyt@NGT3~8@6Q2sq-UNFg=*XeA-v~?1vod z{%9{r9-;kxXT*7QO>`0ezgyTj3+P|Ll9DXH>=m?u6jo%ak($G_tzTKqog-4(GjVH% zFIh3sAE>me?b+fdce*x0TDl1?AE? z5EZcxteu^kO-fF$dm~m)ViHXkwYplu@J~<|2{iNJwu4k<#kp-IwT-zjMYv#Yk>B}N zuMJP~F_R10OtG)G*U}QWfJZiodnY!k5Xa8NYa6~ccuFKdR@as@T1~SSVBr7%(~IeuPX`xMr_`BN$SWQU5T=I=X}T%uAaKP9G|zT$8sV=*+)? zK1P9p^`_2;0I&EPL2k^-q^IeD#-zfP=DbHh7OVqJqKV>01;puqCgk8h2xPKe*X0op z+{+++lv-TPP`&sk0rIyZ8I-B2!fj%)Hn*MIp6M_-gXt;|D$eDbX5pphZpY(VGbOg| zh=<=>-mx^VX?o|Z&}gdJ|1?D{O>WbuBspl;)UZ$WyxkHuW`1a@V*x2T`GG{UY2~-MrqDm@mAymyaEHM8u$9rOaTy(oED`xq=uX%seu`X$4vfCzpeWmXkv4& zyS?RxjoTK@n0S5n^;tI`rHt-!oy`32XPlN$;%+@}DPwER?wf<<`HST*d{Dm00z^k! z)}hLqP~UJIDOz-GSWI72@O%e$F;&b}DIFT4c{A0W=-PHW>7d#gv10 zc2#d_;_q{@cEV^N{%tbA-(S~S6M{qi&^^#7# zSczR}2_+e+O!}3OJ7V;ZhqmLTSvg5Bm|i;aC*gJ>eN$s7_+9pEJ$vLdcInr*6v=-x zzX{n%F|WC+V4}n!D{?IHvv>2cOWMn_3~Xz+y+38xULNdTe#qj6{C+?(4^a#3|I_l; zapyP*Z*ZS1)CU;8(h>y-)l}RBL@RIacUfO)NMats^mdFr^y#)b(jgP_F1Hz4W&otfoAP4k z29&3vox3dPno@;q*c{B68Z}+pGiNKSPkllEy1sDpr(Mv$u&L!>xia|aNVgq>1f?TW z>AgPJy14gEKq0e0&c)N|V3`FS8ap4&F< z`{V6~DqhtS@v+9&q5&b-P#SQ+cL5n)_HK>j{P^ily8@!TqTJ_^(b3WK^TfX@9X}uz z7QQW{&VP%}rkeVc2Q`3cxGPxdu@gnE2BTsgw<_e!WjXU7wPJF8zzooOS#s;6_z}Nj zBx5JLQ!O?PtsvO)8addn3`v$TDDk&JMKnCq2>1uob!1^%vE97*GH7(-rmp+ zn&KlY%|8!DJ3Ncq|Cu*`5=%kUf%0eRYTkHBF=f0muq=q znh+B`lI}y5ddL!f;XXo*i|}N~L8yuyI&3j)Pef=r5e8}=V`lrAPUfdpWt$;%$I|EGJiY^W6 zvXrfrjZOXQ8+dapS=G<%QML4jwiIxjo zBhsuR>>1^Tn~%#SBc|?7W%uDux|_ia3h8@~ZZXj@Yrcz5z+;5<^|z4jTni!W_ngS} z!`+awUbGFw>1k|RLpryzzV^4mhMX@a9(3`hDp;U$v4a8O#_cR07mEi!$0yveHhjRm znybdTnYnCN??5KdoMY1D?C)W(IIDI2+z(;LVDI)|bDtfS-u^IhT%x4JFn`fae=U&y zl2Ce8+}Gab%iUdYc8;+}qGOW=3GM$a4T%2pjwA#A`wKWkG%**p7qh93oRL!a`Kxtn zwCXZrNDn0$GHt_dTlB0&T8q3WtvhThJop~BhCew)Iv4@k0meuTzvfsurfgZ6O7Sl} zH(98O_z~(zEv#p3V5DQjAed$v5j`>tgea_jWUmzS0Mo`CEIC)%t!4H3rW}b0`u-S@b)hmBKH-n)yG z1INyJ?D(0-ju0{DpXUSPe*x@o!AOl}*=i%GKga)xax~M10PDte>SqZ%AaZM1e=aQC zXVi9nf=KjJ&<2Bstky(48}b^tOZI`3iU%^%GBs?>i$G*#@XW1N0BTQj8&A{iKV zOA)YaQ7o@d{q2l36|68te`marqX!&>e&9$^!1(x626a4D^ z8KY-{)57yki%W+hT2>N?wqJusfj#om;_E_RuZ_JdftWV`JwZ^^dzI^0W#$M9j&Bt9 z5KN39_na&5eXHItDp~2UD*xNodSJ)!G>M74MG2bLO_|fx1uO**hsT`nhex}cKB-Q~ zH#F8+xqRGy)a+Xm_Ux%y=+cM+u|yNKQHuKPUImxtzL3`mk^l$zPFB{wy{l*Wb~4Qvnl;B_9g>ZTEFw#n`tpxF4*jw8-A`c?Toqb38X?rg+zJz-)fprh)%ZE zR~G)RPbpvB-_5g&pvA=JV74hQBPGj0tPh8~t5Wc=*ZsYCo0Lr0YOHkw#48jwr}`B6 zG7Gk!z)!E=Q*BE}y?>7z6;&KTz`-iGT-R4&d+m)OjcC%M1ru+WeJ}mVl{Rq0SHMMJ z62-V&Vp82h){-^@gDJf46Kcm>M>mIiD5$?vXP6Y{ubd>PFjUM775Ce}XxsLh`UT(w zlZm`r1-){@*ZkVL58VY7)FOvPRl}iipdi2YKMXt3?td=Z9G&9_t#q$HnlQ4`hOoax z4WdzMf{eU|w>Hag5pjHzmF$#eT@e?0&n*3bu=$`TFuYLE09hEt`K3;~75$znE-)SX zrkMlK*5?8pmHCF6f?s?RPpWI#gw$*m=vb`aP8b%`B|ZU3K$x z)YskX4jn^|oNRCT?H*gjTS@SLR%dg(0GikBN_&b#$B@Jry*kX6tO3PNCcb~Sd^91b ze|)}YyoW(O$+$)Rk_Zbbjjifdmoa>I*jwUGbqU^+W(ki2MBIZ3A9o%r*V(Yu7-jtK zZ#L4a5)hRGroLBOM(poGm^oN)7Mgd+>|nqSxg&0W^mu&1J<6MAFJt@JQ{m!lR5UhUOqFSU}gJC)~a$qm#N={w!A>A8K{sUncwwgbj7g|k zY{$DO#@A`Dr~l8e1|6E`bsSAdVAsoiWy8(BC|~BlnMr1p!T+AL!|~hoOH!eq$L+p<)0eC?D<$*C9N*&>OM`t3 z;#P+P>Kufs|9OV>(r=AV0!H!>5B^ZE2c3Ocy*&GFXm1FnQ<|Lq zJ?Vd!;XnV&y2BSuyS<_k7R1f}rrCz{mG;W*YS2BO_SCd8T;7uBIP0zFk$eif`YR7s zJCQx|hd*~&r60-(zbqa6Nne}OGq%O_{^YmH&jAz5SZMRAaCArJvlqfi*D!1PeC9OQ z25(XVn5|f4K>_Wf>Xh2YBT-i_3ZJLwJksu)?%JiJo!6^lJ!iE4w{pRUa2O=+Y^@c` zDX(ig*2UV}U?Sl+u*aF=$XQhO@8+(Xn(r}qn%=vMqAiB=skf^d>M4p)Qq|hOd)XOk zCiyH%1^D^1{mCmlt1>JdRu{Hd`T?=2J6BX_-D#XC@`n5IbY|%Z@wFFtD0%PM-G%l2 zWG^%6+A}hDl^H1e+S-2CP258XVhB;*hdYY3KdU+j6&NZ}wB&W*m29kU;3IyQBG>Ka z96cn+7{PG)Jl3?y@2gt4hCHJNKra~@n&?n*2+g)#pIr*CPkm9{2~*a)9zX>N#^xU} za=duIx#Gt2ngm1gEpZ{l`!ZUGs^Lq^Nb2EhliJk7=M~sXR&|i-t=i>ARoUg!+gbRX z6okWB=|4_F&yQcAtUMb%zh{XB9E3+!+pmXnAF2Jmgrh&OZA{&Vgn|GGoKf@I|3}qZ zM@89v@54h0(jXlQNVjx%cXvvOG($ItN+aDMARyfgosuIV-3>!`hy3p6^XT(^-?dot z4>R|iIcJ}9c3k_q&NEb5__U-+H8Mkeo+Lu>9vC}=1I#g7y=H+%@WKMhxc~@r;mj4)?T7_ zoGw7Lk@X}9aA=OFMWwvp1yLyNi}klBf0}nw0E8oOPT^S!LL`=#96}rcC9T(fv1z9W>=Dv>B%OF10z06K>b<=C2BD!)-}(~rPJMuwYFma-zpL%W!MxEWoj`0mKQA9_utD^QhhzT_?S>XGb-j+&mpni zwrY2&F zVa8M)=9$)-*3~vLd09o{cnz=+{et1bFFw>Yt2eIoXbBUGBBXzIegeWNf}v*5mdVu+kYkT{tvO<+S5vrBSUb5uu@{ zVA_K)o`B=X#*(a+ z1ZP>(S1HoLT>kUtXl3=~fp_?eSl;yTHU9@KP2=0;a7m7WoW(X#ArYh+YLwig)qu`Q zc`d2#-swe8&D$Z${D8r0%PI`}_4i^6FpN(#v^w^XWrqbeQs> zlp`UcG-uWJsjK!TQs<@OuvMe@Z;%)=dN$U(%2)ols?>{@Ys=+~Kbi%T4Luy(w{Bk} z-^&2oJwSqHi1i zB5B7+$Wg!HL}q;PY9~!eC`%iUo9LoE$99(>B&6UU9(Qkq?55+2SIa}(KfNKpaW$M? zI@7s_NZAl%J}Jj9Nvc+Na47 z&->(2D-y_h6qA6dqC_*17BRk))9!yIt~}NGV{)=vLxPJY1Ad^hzV?z7lHpI$y3WFK^v@AHuHkP%_QSadpqci%3{g zGHWE{wP>ho;&C+4^}#p8Nd%zvu*qC0bf;ruWxPp%j1mwM7_~g@01|^*%Vq_R0k6Su z?jP=lp4T0gD;HsU2;23^xiS!SbXCYr8KPrdwn9M>Kv==Mq&1q{+1g}fSD zc+`WGcGz4v}yJ$K}XX2Hh*^K2j?0Jx6PGhi{>;@kbOJS5s0dO@M- zyXUifEzO4KVam8DgKbe{1GR$#qfWZ4SuiXG*8D^Qg3uroYfflQYtu}3sjaf~wAsd7 znk8-RARlKqO*~sQA%hr4*6m4TWrydT9&obW+o+@_C%uUZpZf?zK7zvj&0sA1xD~`; zSih6gkIRE!2u^PGY(L(1wBKk8WR0vG%A-)qYnW40qr6FTm{S^BVqM%-#MIY(1>Fg^ zoJR{BL10;UG_sBy!A7K4PvT}V=Rre)54>B>P_Ccy^t}^lb^C}`UE@-Boj%8NUc!GJ zt&B2}hyd4@D#ehp+24~7!}OF!>n>qTsH1@95DN=72@Wl3X?nw4j-6Qj5Ao-gnz@W`?x^maUjHxmFf*+KM|38O!7EJbltCb`K&^?ekw7CY z4X8?E-6QNOSwj!1-_R+_Ype{+{&uj3#oFQ<1ZE_E!gtN_#D3xoi7@1u?evv?^CZ@WptQ+uO95T+gFSUExhIS@x4JeZY=LHbR6f;9|G@BR6n7 zq-aDYD=P!%=irr0P&fo0pXVj<<*2 zfc1-8uwj*d_fiLJaBSda(a+8w7?z8)yt#vwx1GPcp`^RC{6#)n#jK$lROyw37`p zNFN;2-1NZw&_JZG#7kaWe3>gcBdMy|Kqh4Pkp*MPU$UdwOP{w)Ta#r`A5p)j&i+{< zouA*+`fgH4PaGGi$GIPUgM#md$bY!p!2$JF}CS98(@8JJNGL9S=?e z!5sbBvg(@FvhQXh1fmU73fk=N_gvia5b&2Av&673tu$ ziNs@5KDs6-aVj=aEEWE%t0d(I0J6J87rAUlTH`IhJ@RxOf zIoTJWU_)SZ<0m4W-1f1hC3OikR84wtr?2fk_E>)M6*@sEd=aJ_+5Cd*yW{N72(M#v zF1C2O@u`!QT)IA_(?ZV_l&xXNHS05YGF}`BG*Okd;R_rCEJVwFq_&>7Y=I^^^BDSZ ze(5!P;e1#(j}c{-O0N@!HRR8H;=wX}a#bZ{IM^q4B?HRIOFIfW-3M1DVO!cwq{mID zDx5bOQ~=DYY>4AUi2l_)79mfu!9m}9udW(%((E)w1;)~qrl&ADE^KtXP)R_@#^cad zWPbV1*{rcLQ10wjXKTs^ajwiSArhW_f{~5wHUXk?ueG(oG}DrUsitiV=Aj6E(@vzQ zgHLD|(ai@-Zri@LD2Paf$Xp}as3fM~@q3nm=%7vcT~M^+tQpO?0)UO+C^*+eq}Q`w(D2-*b+HMes&nOQdOBbDy@J4$%DvYz;Wj#ax5?X*N^{O-Ld59XJ$_R zSuJBDEX{STJ2CJltv(V*?R-WCb30RlBjKpl)I^91Zc7(BM~4;q%T=k&^3*$o5|$XR zGX@{_&MQk46%F?7^5VWg3TOOv+14FFtV|3VmT7{jdkvYVW_&~#pD}to;7{%_bh9Go z0;s5IYx$Aos>@?{}53h)LCbx4@`Qi2|C4r}|bj{$e~ajGaK<*kdfcGK!=0PVVa1^AhK zwxLmk)i@1sM8h?N*7JZgEAw^=X3E-&yRVwP z^4Ub2&$E_YkM#`WDBTXbJA9&>Jn|*Lj(ajbb94lSz9Mcc;;(X2wKKllJm{W6jM8Aka(uBwl&hVA0Bdzun1SX)Vdq{DI>h;X|->NzbYF52RIi2R==csO42$SuEn zS@464CBu7#lO-~P;Ci{+*B9wI&c(f@egs=@<_89vqpt+jhicU0TADQK{Mib-&FwAY z?NVVus?fbpSySOu2nF%fE)X$HxT>@J`UNc=0?K%rX-dZ@jT2{!Ay)AtB+a%2vQ+JAYNOUP+E!`y`qH+N+DK3gIY-ww z%ZNvZ{x8>b=x{9y5O2~koEGml#h=F3Vx}AVJ|(9QEf+ohfPp15UdV)76?X-R63KlV z-xq-kPU(m;2s0B?v~hEsWAa2ZeEFpgsEpVxytZ1lLh+&3YKCktF91~O2ay}u_FE@ z_{756J15cmz1y5lP!L2vF(V=HX1h2c%K;%@Gw|}X2u+hyO0(SlYObpWe0%iXvZRR9 zg0q%|d+MHKpLJ!QXF^|IGi!h2LClV`z|A@U=y+Y6_qyJES|6UE6S%&Ljec6+7$qpQ z4lgVh^$wUwwZ-Xr4kJD`!7-cTnZaOH41JNuXhLBkM@{hZ$8^Vg?kyTu3uCIR_S5ev zvpjJ$U#K&xv%7FAS7#Nlu%Tteu;@X|e2*YS_y4I1Gp4UoW7`^`i-BL~rc4 zvXuUHBF6-9t#bnq?&A7(6<&1x_;`JP%lGs0grC%bq~+8UKOaB8FeG)vx~8z0LBZC1 zt$n+2y<5U~CMPClzM*aDHo=>|K#BLN{&3gcVeg2XEITu%VEiXn&)Qg%q_K7Z&ADQA zaT5|1(<1pHe0Yq?u*e>%&xTk(a#4)BxX-74lrR3|<;U3qd3TA(kBBL`haO#~m&gP+KPYb^C&_4>dfYtRkPUGCeX%9CdAjN>lr)V; zW1u#xXFOG3w0pWbkizR&nda5H2xD_VnKjO`bN6Vd=bjez6JQq-CRY^jR1~Lv*p90> zi_e?uElB$)ombY3g&?A=iAV9HuB=Kh&ICIw6n-%}Moey=9epA`(-iSsPZ#!Uq~W99 z`c?jNx0x6Nqg#q)+OUvhko*jYCN{f@QUVDzNOM}s+GpCr zZ3`Dh`4Dg(qwevw8xQZ4w$8&t7MQSdB`v0xD!B8s;wmPYmi6ZHv#^hVB)4az1Lepo zVK2Wx!#*zB?QJ_KG-Llg;^k-DXHJTtVUk{N>OrV*}? z=z$gWt#)qldvlFr@yC|d?#D@)?-BM}kIyvphn}Lpfud0K;~T+-uFvV|0S?cG)8D=S zCd?dYj-wf1(s3sX{jnM#R#!<+B!IV15;+Vzy(C6SIaWzY+!0+5PpZ2g^*XJ=0f-t2 zQZH>u3cie39*gV(3pv~SQjT8&2-(e4Eo>|#856jfC$^ct-cqfJ(OLjPke;qS9t9va z&{n3u>xPOt_2RhNxl_Q6qfinpBI*Zm{kR;=sE3IeNYMD@gqOB7=BYaeOE)FQXl%A z|C*YaPrzD_qNyRlxkoXM3j0?;^n3|HlCEW5_0{4^Q&AdN5EoVCfSiaV=dNpxmrHui z9<`3ym$1~KRzE-)?M91u5vSuaqXn0W{&91jTwf!`qv7|#Qtjyl&QhHfD5vn5UkS)d zh)d$fJaTEoI*)~6m(xOK#<`gtSXR@YF$380bBo8iuY=7PGae7_mgbtgO9bAqdDdYc zlL@+QkNqxl3_=|7Fa0jwaUA%27prMC>b%?RV`@xrb`Kg*T^88* zR=qgn)s>2J>klLdFI9CNYk32*VND=jilrfRtRTQJY6M@#K965>lHbM_s2g(sVQ;?tD z-fl8Q2f%J}r<)BT=ayGHKrXG|mD9&VDQxAqr=Gz31vf^FBUvm`<6<>@pn>s5YgmxN zNx5sU8{Sb?9ThHTd&V!m1&PGk^q&KivQE$&j)#SmKUG}d_zR*lL z7UDv;=n38#Z2Abq+yFz7BisV)hpyAahOEEjx#HkR@~7?)=YivVEe@p*E)At4r)8_1Dp-8r z)!Dt*3GdOp3c7D4dz#wXaSlL}RhHh7K|A4c z-h@NAGRi3ufM(imMHsOJf`9s;R+p{!FqUd_YHTjG9N|uo9|AgLP~@#^bY$q_5hLmX5DiluInOduL?y*g zU=8YcPH(E%m5(<`%V+SV)XkQHc(A)!AcpY4-D;iI%xQzJJ+ozo4THAf!wn^Fg~cjm z!%pEXbzQe@V}8;|ls#1(DnZ}&VbsU#o89av8>1E8!wCNK(@*5iCP)h&Ca)bxuc=`p ztg?}!X{t^(nVTv0rRp4czE^V0aIcduNGjB~P(+8kC+%>Y%5V{$HTo5mK zR@=Koc3&lf>e``$jiqd%o@N!}30Yv&-qu;u_&~lKh?47I5W}j!9H+yB7Zl1%On%Br6bE1_uu0Fa2aZ6i*T>v zJoYY@FFVg+8pxyXwlQJ7((3`^jPmTuhQcD{4M{nIAD5(Pu zG5Z0TotY}bhh~W@QM$}aZJCbV@+S2o;{RLios4T-nV`6$y6>GVSgmA3(~g3zt-0($ zwt&v*CS-g-H`!1x4f#W`b7xM5P)0`?UN#0?3!++cq8!jPR0F}6`PXJ1Yd^x>6yWd} zX8D*;u)$4FzL0p?#lPm4AFp87h%DE_M*nZ*j_rfd7RoR-AUUHwy#zlZyM&!YKX)aI zF#Gx6=j4)ifgZ-qUi!LJ&d*!_?~Vx_6QyHoHfQ^{(v1QjUxMKNJsN;StsNW!`=5X0 z&UEX*c@ZLcnCr&=eQ)URSsu(zJTgC65s*nQL;5oaiJht12Gly<0H{)=S_%Ex6+$_7 z2>iPUDirff&i2TgsQ<;5z{_JbU>6d6-YFp4vU1ysV$7S#D0374v8-u#)GB?n0aGF?B5?l8ES%43SXsUYpm@J2$EUpSF27T5;6#b_p2`! zYf<`0-rL30L7wwsVyUX&#QY-s-{J#5iiw&q=p%Lna|ifB4s2Xc4Xs6sJhyJ;GI*DM z1uhaplkgK+34bKtEgRuOdq*-7_-73$FG}=TLuf*o;&dX857U2e^vf=cK| z{e?g%3nJ6iY@1ka*BCpWo6oF0 zRT5+~?x19=i|4!xD)h}+-N_(SC8B=ky35Kzd^LOTJ)sKgOI=67oYeX?@=a~NT z*uM+c=CjEXh$>~@`dFC}v1X~;LM89%lfBcdLOrNFnz(`TpV8=!&f~0dQ>3`-2EGU~ zoNn*?v|Y0IyZUr3TwY#13>y&zKZE*@9RlMvthWMr*l<=3)~ejqAIh0`t;Mu47Y;S;(`ABL`CQ&sVTUxLg+C2M%l;jDgIy`Z7zJoLC72)t{?Pt?)TbVRzrYfM2@f3= z)OYZ8R5ln_TuWQ~F+e|0f8yaand}ubKL1=3elb`k&!!;PtF)Gv8xgG{21ye`KzY~;8Slxg^KtDtv>m*$1y|u>o zNc?xtms)>t_+ql@M&$qZ&28zuM z#Sv{(urwb+|Gk=kBP|94o*m>>Jq)eRuwthSbWSH~-+$Agm~h-X-^=EM6t9yUj9P$5VO#av%2IsfBF%8t>R z!?XfEu3rq9~p6d|Ff)wODarfl^v^0M<-f{cb+ZE?xN`GQQIHIeY_#0*#P*+tY zhrg8}|0(1=Vtr?>=l(_U#nyc+N7@0;=dWlzxqD~h&JSX{OnA~f${Ej(`k>J-XbH&>2Y2p%K2Tx9UakGkA3FQ-0H2%K z>UFmTU>J*8*zl#4*qCt@{7af4)sFV>Y3+zMKlbF3E~FG`vjy7t=QGe`hIc#fY<69x z&Glx)`qolO!nMje_{UjH)B-rOLc-+i6(R^t)0?*{M1&_qrhD)%^S3&(6kTUpTE zEYMcnTqvu3Zt3;l*{`d%G?v$?%uvO_$)Xu2p!~63;hbNE4R4v&Ay)h2%yMCa*$M(3vG{ee3-3XqlRU=bv!1-Wet03hq=lwd zGLo30akN$K;793hdworZK96feBb07Eiq#n{7hSo9`-uYw(7dB3RVknx2b&*W6*-rkt< z2axUeGIphwruOEt#xU#}NG?y-Fp%kbUs6$LzkjxLR3POlJJk?`J<>=8wTU6KFRf%@ zV;HiG{#EA>WIms+NfB=ysPQnMAhGm4ibTI5K)rw3JmgFshRaNJ zB${Ruz2~qXV%q2oIS+BZDRRm>)8K+hX#N8mJrW|HbuLTI(D8SeDz3OnKO0vY6BX=m z+xV4mn&!QR-@a@t`ai4;SZ?`uxIYIEO{k`*Mpnp}L#V@I|-Ye>KnMK@tw&JuYVx6tAuQ*ahu))ba!{i_8CZ=Avt0C9VXf&NB3FK(>E z=jkxHc~6UHn8>in19TIK6{uRApp{`)`ts~V=Py<2bdGavpuApN8=RlRadMK+Z+U%#y%kq4n={54Qh|gG+ z1GI)=``^qDaLLN+IDAOV0aj5Q#8YWWqgYc&PizcfHk@(S){K0eyDrz*Ty}Am<@>>6 zLrWWJ#hZp1w8VZ{rowFK@czW^{qex;;sgAz=z%x{v0P6}p$4MYBGD+~^h>|F%-O)t zy1PD+Cgo3hIeLyJ?eY3(q~gags+fBLbsHF2nXsZAq|R{aCe|D#S`m(mV%2y{L4kgod< zR!uNZ6+9CD>jPzc;e8AjM4FH8+7Dc(9=+8&h+Ob2+TP}DrbN89HISAc%xx=2S{oaz z7`fFGN?V>gDMN0h8c0+~pVu%fAf29k8wF2~6gOXkJ5kqX11nxZP86{TKfB}9Ty56Y zHq^$6?$g+n_5bj-I1!#O^xb^t%@02+oadhh_I|2lMJe;)YvsWQp?sl7+M9h#O3k>1 zTX}8<6Ss@Sbg;H}0`~SPud1j~`)@H;hmPdGO3% z)23?~JUiK}t74HThP-l;;RUcVoO1_;vRHr*H%~(9D75p;wjX0X`A`S}=GwVpWX#Va z+=3kmUZQ7Yi6M~MPG0An;FLJ#I;zW0OcaAJL#V;F^|;9c>B&^(HbHa?pB zt+F0IG!XUpsp?@XSr7z!lnV-q*401&U@r1`U_}=o8ZC6CY&|_FumrOD^6~6&e|(fN zXpJ<8)&ynQ?b91yhmTR;YGG43~ z72en`9ZhS9ZhwuirN$fNG1KCRy`&S+E{~1KE9M$knpgjb0z0G-;&`($p_Q=!;EHR4 z2Agi9vZwG0YQBZiTUxsIH8?116W~6ti8OYF8`qcQMVF3)4`509Wov>U85|_o-r^l; z{-Lg!n~kkfv;acx!kC2*f>VC&=3QOaQlD5@BBIUx)7_W(uR}XuUJsWFuLgn>OL@)O zxRUEAzLQj<+5MPNr?H{Hr#}OIFO7twpI7WQUsc9Jhvy`QeFhz6b)l3Hd3w6C5e?tH zTX|S}OO?&c!t&enX)CtK2KID^MTpS@kD7T-{#lX|<)apD#?TJju&$xb_=KpEKZ&QJ zHdDHR1`!UainB*$Fj$5t_hqHVjH_-Lzn?=-kPy>R{T?M%oC4T71&<(zQPh`Rj2iFa zezGikX%6Y{bu+(M!tg@^#o8d*)@6drVSBif8z>AX0|ZUpC5tZUc{f@gcLg90BSeHD zoPBO>)rBQ_)Qq92s?_!q@g5>Uwr?Ewt)=DF$BuLK4GhvvEv0~Kvh zhHql%kOk*-)B$o-*&>?|KorEkTrAXqC=;ALNH*6(M}o=eSRQ-en8qAdFm4nkNB?~Q z7a+eS@_d5)4)@Al>_?CYYjvt{+n9)Ri9Bt?sF%*g=2cc;;8Y%)tk{-W*siMW?fdKF zH^h^AAp%v(PFQIf>oeX@+IX_W790ft=!JOc3k|=+&I|B4ZoyA>MW|wIU44ww@CYlb zwcAq7;nx*YeFN981--}D%EK%|#AM4r<>v>vc7uLHLybhCoBewLs45S54Q3(H!^1ck z@&QT5LXSRg5;flM_LcXy`R?0Y^Kx6R_@6tfI`sh(B=gJsE_@C|>8f)Fli^V!uOf9* z@H>DWs3}3LjkL0a_u*khwU$<9i{uhm?@9zij{GzW6FFI#Tsx2Rk0PwrbbdbbOnp2( z*#VN^%HF$JBUv{$rTgH>Q?t&%-#|&CkEDkK>A!SFZVH_fDi=%|DGoq`BP%q5oL(;g zs>4?U=9HnT>N15zfsb=E5)5z>!P7cg09i}HMVZx`zO<5oT(K`;5lExk*6$BWw2Jsv z{TZMa-NgAKreJrZq@A(Nx))&#hxWEHMI7hV)VWpXN?C#7neP=wS5J;vO?{sZswa>I zG3y~DpY-vb`@3J~u0A|e_cST|EPW+MGsqDY$$RvB*B6Fa*Kkl?SMv!iF%vN{Fg`#;8?jIBi8eyxzmE1J@wTk&^4 zgdJy#E;bZ35Y}B9fkK8m{>7yw38TXn0E{D6^DV|p{2lir_?^y@yeNJBYM<$ zQAHD2&z{l22vC9w8N=P0oh*g2<#4@Wf@qc76=xrru3KzU-e&odK=Y;bms-8fj{rR^ z5k7Z`yf56!-9%-M9oQiX<4Z;U;P4;8$$fcwAzw!6G)qVe^@PcWkzxzRn;p=)P=b5) z;$2wP!xcjLgpr%A$}?YBHhDTJo# z|NLRNuNyZ*gdyO!3#6}hhL5xp7j-4D`%ntta#XePfc!|0k`0GSd*V03GUJ2H#kw$T z0CsOmLYpgwcWI@J`V8dA*=-M>=&VAm5PEZ-8lEt1{Ju5n4z&Q$c@lWqmFa`O{~E zl51?JjYL9UklrwtOI5l4MvN8R2K{yZ{xak{vy&dacM&M)j$5L$lyJ+B_~Zp8&#Z?R z#I?5+uQ#Llrvp&M)RxIFR`Os-quOskF%h>yt!W^$3+FBOao9i@O>1u5qm)q7hnH5^ z_Yp`aP^iEv7JN!`!t4GUr5|?BJA7$)#nJXvk)bRxKHzlczKqyt(~0u|7yK2HeS47m z2=t3^qDAs}R`3y_8u5A8HJ9CpuZTSjAigV^{Wg-^A~CDjpJ8&KVz^HbPMfG9k#k8Y z^Pfl8H2rKf3)fJlOZSDE*ZI4d>(MNtoa~I)UbwG?t#!->e0(C8x6v&Pw(9P>XsM<@ zd7q;GM$rawU(4;Jzz` zL$+T0&5Q)bnDm1(nQGC+Ebk8Xk5}LRm#tg4PJgy)G~0V>@;8=t7;|cY`+Ka8@&5mE z$BG&77@(#Oe`GXyIJ4VQr;(xZnidgcqtZVv{(Mq20y`4+!U!E+(2Fi&C`)lcj1!aU z(f6&FT}`Xq-qcH7UXAC7nJudF=DaMAJmF5oTg#;zi;S6q*WuJ~lzyKfsD*!KdhAF} zO(*M#8w3Y()R#aBA3qGmaMTYK>paFn{*YdXN@+hqA(j*UJWwo-%5eU z7kKccj9yhnCZQO8U_}kuWOZm;B3c6}Xk<6Se$LjL0Ir^0qI*P;w9wLQL)a@}QgyFK zzz!cDby~{F%r1za3U%)01oARDqBR{Ad1Zlb_r{}rD zA(dS!hs{IAd;Z3WY^)=@$K6{_B&4poiXsjfW2?F4zIlNBxZ~;IEWce`dVR#<|AAuYJ8u4ekHOC z(15o)?@7^?I^D24*2d>`19@L)P35xh?9z(rw?&1K>fN;JK?W+tlegzNV9he+NIv~^ z0lLI*Nq6$MNTrQdE>vN|=zucX91kaRLPp`ab0u6vEA5VV>BqJXtmqglj96U#0D?N6 zcFT-e9-NRS1SqJp4QZl9xfAd*=R$#`o24=ppE}+@je6Vu41-#6Gs8ljAF8cz`r0Ze zaFO(v^LWnYlYP&7%{ONHNXuz>z|C2pIQlp%1zvt1(#27Qg9>5gk83BZKPF{O{j!v- z#NuoS37*!@nP`Aua_CIhojZZE)^S2Cp5CIHYdA#iMoag;QuKa&-(j75fu(voKqP4E z>mMT#tmSBUmU-?g+?H^DUaJ`SgPne6z|^?J3|mbOul?U9p^ljU68`UBcsp$J{S ziqm92^xUey>Xfgo$>ZXlOd^y;>(1?!dh6&?9OxGTZf}pMzh?aB@!HAvz~h^40qkES z*z>^au)rsW4`qZ&Ap+>wRER_+-k3N?1B6M_>*5xLZpx1tjT!R_MpQqennKWEc&Y{a0k)s79mzRUkrp4?xeI*gT`~0V*xGcY+3b9JqC4iyK%k_p^-sUMy%Yps zNVQ$e?>t>^kc(o;=ODbR6GX7Y&XbD^UC1lczOb5%=)@<{tL1zC_>G_UO1j#Iq!=?N z%NW+rI=!5v>B7H#_pgUku6yvZvaYZHPP^Y)UtezvTUc0_^}k%Ms4LYAPt1p*51wLj zBA~Z=Vtj35NJ%5qo?3dR2QOJJ*Y*>OIF;B(1TJ3x~7?V&nZNJcXf zYIEna-=i%85tN?EA}x^{+F69(THPCL=alscI6s9+!&(n7x5j>TbtC+rI5cMWC)Aor zJc%&^{)YXOV@7s9PbStBX1h<-bqz;XJ_~#MAtIIXw#-TT(Y@>8KbF^i_y$pY0~Aw@ zFDG~_K8FOS6>Qm9BoJ#KzpDt`c-Zw~jAo}U_#o-Ezx!pTTrD9P`2)hzp}w{+hd`31 z_P#5{B!(s%gY(w6*^>0%y&JH}_P&_DW}QHc8h@4PJE3dfb9BHg#l$6mVK2_FUe9V0AgN0J@4aGxs{ zv5_AuT5;qE`!u(bU?4rvVR=68>`7xDVqiAza?nHt+|AdwS6u0F+-{BSjQeQyeXDI+ zb2dC*X}{eQZN2_|u!yT}Il;c)lf+WL)*8E_Pg6D7KCP;BGH~#)=QBCJ7O@$>CB;at zNXyTNh(BAsXm|Q8s~w}&5eYPKU_n`DiHRqZS@{FkGY2brbfhhltBySD z!00J=sAhGxyF2i4>V8lPp2F|?bOKUgDl!uoRui4B+K@mJ;Bt&p1rEx{$WXoTu+}=v zZ;5q1^~y!%LafmusX?Pug1H|1-=wh=bP75d*_Bo$n_3E+UbTvZo>$IsNswQ1>QVDME&aXb8|aN5245( zR1A%+)7`n(UI;Ixmpt9oE?~XF6ySW7mG-#*s&(7Fg{2rh_cawx2v<>-i2s3{?1BQ< z)%mq(R(du93fzdJD7nx1S(9B+tg-O&6I-172e8=ia>KS~h4I(L@%*tP{9*DA<{*`y zLivmPu8)r`9kI-nAkb2LnOrDGA?bE1c&z4}#gP&K@(D*v`7Iq%U?8@Bu z%G~c?<@cFlAh`99@@QC9|B(vvQMBCI<{m~ZRj`xAQ9*Iz!pz5OWn;CHQkJ|RrqnN<5AIuKaw>@Wo@_cB zf*YyotaS9$DMEgJV@*Euun!&(CSmyI)d&o?jM zl@yZrj2*i(ajY(Wn|y^r_TGUC1t4b`8D)_RV9Q4z7++jmU!9$t+IR$1W@BaPYD%n? z$FrOI3;u!H5ig|PUI6n-m_IZ6Qq|_{?3ermMp6o}rE?PlobP{64j`WRl(poiZ;d zgtV=g!YpxDKpSu8yz!i-gC8_7c)4XN;xj)n=~Y(r*fpRhl#Lk5u2;5B^B)HzBC>@p z+)qj~Kf`Cvzqwh>;VHE&8H{l5ddgdB!Dz`kyjPOeKZ=GRKs|9RXK~Skgmzb8aBQR&Ag|roe7tPKBSB-XchINJ#+>vBaEx zgyol=Mo5@c6}NaFe1KEuzP!!kZm$)+;-tq%$xcd2wnA$*bh2U;B;$zq;q4%npwb~I zyZDRqNF`og>yg=?3O~4rrM!u4!*vyyppv;kyyYdI#P=66vQU>tvU-SQUko~1(UUP6 z>cvn*(Qc;ze^=n}Fzi0J=Qtx}Q0_xFm-fMDnQu^osR?`L)!V%YWxd|p-uC`fGS$70 zo8BIMg=tV4FX&#@;rN;5Kbp)Fa?5KaV@GIomOEwL5892?)#AAV0GLb-y`nVYy;v*W zo!=XB)-D*`2=BvHqqupbrkdgTT)X7;=YBp54>Ji7Aycp5|05v{ zo~_lwM-*(Cr}!4dF#@+`7ap3x@bfGw#fPbC79^zot3s|D}U4eOE} ziTpH&3s^o2cEDOTS&zNKJ+@2AFCiTf>>dmppVcPZJ5#f`_EFd@i3(+uZNcELA6BK# zB-FxQH)ORZmoX8f)cj7?oyBG^qz9_?{K}f#!hualZWCM#p7K8m<~)0FGZ7!)4knrR9^OuE`HYSMdemsOCOJF3CSpBGb73V zO85nfIjgmo=$!M2W{`8EX)DjTEzv+rDiG=hz2B!taWG2GmD987A}db0EwP7%TFQ@O zdVnNM65a2Gi6Km=$MHHt3=^&-AV6%)a0Gv)hepCO_o;g+}G;yQq-R{-~yllW5#jlq}`-_4OsR@U4 z|D%;{MKs-oTOLe_P?wpi0m!)lLx8u}?4b_9=?hcO*4943y{$&E=4L6{!;O~^F{LJf zB6BUoGZUS-)aBRmh+6E^AYy*;vhNphdRH;*+?g=PoWEGv2-k35O8s%FkW&-Vt(e?L%S8go!|j;mRzsJyXrld zCCY0-#&~vKT@37nK>gwYLTIDm-Kwo`T40Pg!I59zr$i!Vo9U= z8A`|9=aS@k-Chd~|03cEtpHKw)Ti5rY~@&)M!+xm7x6Pc3bBt~TzmlaPcORhs_9VJ zRoD2x=lp6uyq`5neCXgHjysTKJ;zvYr-hRK-ra3vsLw6oLJ(T6uw-dEg+75G@d`uf$Qou3U4!CHT(_mdoMRzjsjrnmwD zK^mdf0&1%sFn0<&az5A)+uHs}rakvEW66c2fUf+cTC2uCvt|2paL?=OEw9bM!*MIV zju!p@&6M~+_AXLEag&|MPzx(M*vJ7d+|S3ktfRKGj1CG_5##Dw|G_Am=&Pp(qLRmm z*;l}}bEP?sE3T$xY$YJi%CY`YChA>eYvN!mT8ECC#NiD)Ld#n=#N53h3iIc5n_ts zScHMT1`T@OHl7*rwlTB3l+XuVUBaE@SU_*|LO8&mtk9W}Yg<+$vb1{cW@h3n@b}k2 zRSg;?%&wmZCtle?&AMplninOIThO`j#YtYxKv}+ThHx`QwOd=!;MUBs$sq$o5XLKx z7vKLECt`4|Rn@)mWpcLnyjSjbS-UwHfvRg^#?VMeu<6@)8E(Oxl^su=uHa}A?p{(} zsI~@G>YH7(xd zkX0u&Z*sOg5zaz8CXiATbr)8$bXw3jUqPRM6ETuj_&%NG>Q^IoXoYJzFF&vG-u;IM zIK)91OCm5dx^d3}p0<~(<>KkZ&RmZs`OF+6{@M9D(;kb+Z&@?pbe0etvVe#C zejq~4N*JD>gP}u=(<18^hp-c~oTL1^+EF!ExDmiEx?oc=qI`9wD@UmSG;1b5X%M|jwnA-4d%@Z~oB;`a`v15?Q9^#%c47W+D@sr3j#r|@r7}AAN zT>F;FGY^~c*vg^0;%;;S0KTrN@#NJb5O(jeBbY~|Ez@)36~tpP90x`#Y;hu_*(ik0 zuNyE@&3lk-cb+RsQ8_;C=VW;b2w7UpuN#R5%&)2}EYClRz1%+BphQ@{tH`oWdGK#Y z-9{7v$4-FT`~y$cPW^?GNnjpQlnWVD1~m4cZ189|JvoCr`Y& zg+)c%g3O|2f9l*S8*i_=8i>)11=5(G)&`l3`chUjjjH=E~FTRn&FzTJU6q1 zaADxYZ$7r2BmpmG2`wI)i5YD|&HE}*NDjHPyrHyiom!akc=i%w0+nyi%?V9}6E4hC z35X*WCFwXte&$YmbnJTBib#jJ@y_|5K%EUr6R)m}+Fz`F{hy|)vHPNiR>*uYskTx& zU$*^0pM51^{a@d)B}jauRf{OG!22|R_S5ClsNX7(Naq`nkgls5lU-s>a&(!P>-y9I zCxQ7{X^t{Mkwfm4#C(Pkv>yZ(S$cq6jlwyjZSKD2qFnXAoZsdWts(5NlVJM9u;)%I z9Q|4B=OHW1*gw#75;WIxIU1mBW*UlFqTh!7JbD-NQU+)hfM1+Icj*6{=Q-)q>jb=&BExS{(m=F zzx1!Cr~8ja0$OG*b(NLcX=U0^3&S5Y-2Hh-m`9Z4O%}mldkem;?UsR%+-71+rz>i` zm@wdT^Fqpd%D%A`p=IOptINS?XWrp{C-2{^xR7#(~FygQ3ic3c@`BV2~Y4xX7J z#@dWXMzg$MhBZyMa0(c7pG#;T4zZmk@U@7 z&81Q_wf`ak8yL!(&o$nYz^(U=l$RgENw|G0>5f(Xn3&x=S6uW>-eM0gZcdLfe&4BmVqinq`xPtlnqnT>feSV%P!LTj6Q8X^ zc&+C%gZfsL=|QV|90;2itT~A91jE^4wgB9OCveG!{X_e`a)0HD5C|QgK3ORGPZH7{qiYo!+oU{*TnJz*Wk0z z+C5_Jd)H7Fc~u)DaHxtk2X8$cS`b*Q-CeS$wQ3gIT-r+P{h$9X5aKE$bQ*{Sv}4y^ zH}Q1@i>jf^?}yVAR<`X?vk4>$>behn$TCoV787bUI42>B414iUrkH@1D-wbjSh zyLOTPS^_{*`vSHkY{lBTW4WpNJ~lL?9&6tf>pjbzBxwWRCV`MbPfRX)$F@Z*I#+zO z)A%gHoDJ^-VA(bkWCWJj7um##yfW;m4G-?v zYtG_mn_WII8)0x}j)A^%3EaB72EJwdwGVV;JVIGBje4lqZo#qm-5dRAp)TVDQ%?#f zSL7qs5dQC%fsY&RP2V{Ve!>5ptX{W!`Q^*{9_j5SS*w|~OTb3|ELntJY3+7ioB2}O z^F4F2^G)tuyj|Wvt|dQQj-wm{Ja^~W7ZvJIXG1&<{XMi4_C<3veu<(vubdCq zJqtDk|3>Nqm_aaNEhFmfG9u9awQFCErKJ`6NCAuvwza=6r_axIXZPhF6_uS(`8OXN z6l|OH@cw%-2t9e<9LwM!hq7`XIyYZAG12#gsO$dUOMvh-f2fHYX(?$3Bw@EuR~Hfi ze(BWsI`Q8d4HQdo{at1b;ya*0n`8bz_#(I_E|TH@^Eo}92vkA;JzstmKLxMrks~~n z7JakBd~OCiA`2m4bQEIy<@+c6#&QX|Th(2=6#n;?1&h7D#_M*Ps225Z7{YL54RdTi zhYjwROSKrdkwj8_3`8f|^{~0=A<=b)YxYl&dhdifK*ay!6*9Yg8Mx_0u)B9*-QnSt z(}dQ)R}e_zf!jUlF)naK$6L9;ur}GzUn(fgEhKi97ZGn(g2tc$bx4>%gfB`dQIB(e zwtRmlruIfB4K$A>$+s^G8-ssKbmwmf2b??#WBt<|znNo&>ev0|*WxkwQU0Qm-(e28 z6jHYJ>ZoE=iN|un?UkPH?brOS3F+!TNyr>&{d52QLhSIm<1ZV4{>KkB3%ee~ba5rB zvmV^rYo=lc-ul(iUxEmW99LTsDUrnWh}mnT-De)t+DiG0!Q*l1@swD1Pk22^QfXQX zFI{4ONsyhl1JtcHtxs3+{I^g!d7tI}oleLy_Wje^c!gE}^1`S?axFT6$E<)S*M3v3zK)u18VL{VbBhe>D!h;OVk0Swg zC~(eJ9LZer(l6)9q&~v%7!#3ey!lW1%Tu$bd}4lc+ezPt-PlS=0I(y(f5!E}^{Xod z8@rI}r3zB#0%sy~>M4qp<=P}~U5-INLmYfc z>8Ng~r4w-Pq|B1o%EkTt;NDZeTwo3FX@%U;P!uR1fK#OAE z2cIo#LHOoE;Snz6D*|suZ?viU$q~AVg=VL32^ZM2bDr|VUgoApm}qYG?u!3u_&!|| zBSj!WP%+Y4UYVZ#Y;@w%;-+R9dy->^W~@20lcvevR7^cVkKqd9-z*3cTfbrdNG+hT zoZ#vxb_9-lbcx`Xdw%RT?izG`xSIrw@|rW4`eVk!=SgZ+X@l)uuzS62rTI6${pY>x{Ctj(!9~YBvb~A80eIMXeNXOiLw|NvB%f8S zstgFwnDLN2Jf$}^wf`2726eb^n}GVF!f1K2`|HL!!%I!&dFzT5UMaL0!awPkv*?RO^LFot;XkGUX(=yg#aKP~ymF%borlOgQdWn{q-alVQ1+uf9UV1CvXL`mNn>X6_#6w7y9;F6~Y&)R7wvA)Y7y$*iAsn3U{ zHU~GLM?)>~jf5^pC5?6%n;mFbgZ8taA|lAAWRz4g;8CThYRm|j{fYm%o$XHbDGkXC zsv3Mfj_}9%tMQib=}B{H5PGjhQ;FRXm7LvfHrjiSHUp4EABVeQy#)yVFUU@73$;Vj zvh8b@R2tfV?=sY<-uR1bl0N!}KSk<%SQbPwNsUBg2thIOQyf%14(S-CMLM?55ygAr z?qF?<(6^ENKWTrI=tam2&HWfXQF{yVQkv;Hmjn0EO5{AHte^}w{``fM1#m&$39nWk z!qZs#AL-Tk6x7y?86EgyMz?!G*5Srhg4AAv8n$sYq;RIb#ik&*Vud+wB^~GUKdX^T z&zbH&`OrD)PI#D+XaZ@A!>Y3S<~GWsbE=;2~z1T$9B&i_YS;~y1V=zy}ix*TM9+uc#J=$X?{OzQ;x_}RKUNA zr7hQh9hH5}o&TgUN2_!0qfgc?KX{KSxco{gGT(~jYNJEC-%V5c(M$OF!asu4wzpw! zJN7K~YVY#zIjpB3r@R0r3n9aFYr?s7!;Dj2@ey=uu78@{s9q{Z$DGn-IrDP+dwh}j z-2ir`$ddTAP-I2%Dz%_ah2xuAL-E{ReN@?0=3Qku0qGk(i_bvwVzgR^fxCvr#)ymD zc>i3(M%f~2U-p>Gmk`8mhCw2*h{vzC;)2F<&SZ_!9YV2|m!bwX=-SM)o!|XbOw{)v zuiXRhyg^CED<3GGDSX!rxPlO~@0YACr?dRr_+OHZSmdWpN<)5gPI^^}p(Jj8 z!811#A5>yYpqS#m^>p!F1dtH!M{zTKa1So+n=&JTf4+LMj319;!MgPDqDbPaS2DL|F|f0?KLbZjK$DRfC? zVW~r#XJx~8aXnO_FZ=hj>C{{!kA?{!;=WAYK&~)SRF`+7r(1w&z=4>Km4{m6%1Lo9 zDi?BpFp?Gh%_1Wv@oG1DCP8|D*c77bHim}|XcycxxyAw?_AG!b*37;YtPLB5(-Gav z`Sr=6OP1eeF*^Kk>T`z&y@NRJ^Pv)RsoO+7R@4H#O&t8Z_&Ny!E=#)}PwRh)(F$)I z#k37|200m-j3Hi=yWUSw#wO}Ozn3#;FK1nh#j=sjHSrPK$d^Owi;s`ZisP5(W(w_!2V({TP#WgcKb(Yk_(fN9K>?ERkSW+%12)TeF0f-qwf z34+6lKKc)u=6A7$%n>@7&z(5@cKPWecvmb#`E(^w^%VkqQMk@4^f^s+Gj4wmYhtJ8 zX(epwyi1R!z?aAUw0kGv%y_9yq~y-(#^>YfaHs8)uIBU^_ub(nF1AtCK+iVo>38)P zHMv6j?rmBC>h}Gzk*8^H;d#bUlqfT+mSPahFEcPQ8eDbd-jWl}@f#rU%SR>g&EdOS zq#|R6{$T67^`^I2Yo?~ZlxnfN^YU;G5WK4Z6$1S8JMt=WMXavJ^-tWX!A@!xl&I~U z<$t9dwODf7ohnZT7ue+R;%Qakf+X$7tKu3GJ`M7$3`q~`Zl-qx9IFQ^qxLXlDbx)7 z4M_0KFWtP|h|lYIn0m78`xD^QwBtY|?IXC_>{MRXNN1c$OD9`YQ{PcjU)WiGfYWqb z)~JJB{uMUp|5kphO5Sqfit=k@jnrn&Sw(5WoMx(J-9ctyMDjkgZ{zH{nN4LG_Prk+ zPi>Ve4Kbg(mcZAaNE_;GmcHTMROL&$O3^Ho`*sUuQE{Q0PkEW}P}Gz&*xHthL8q>lCmh%Od2`kb zYuQz6(@dm!ORpzbA5W)s(2}prdN(~N_Ze)fFIK|L)` zM@xY}cGFBM+HSZiPSuc2lYloe(z12`*#E+FLm>|5BL9gxl*Fdo&w+0{=j{-Y)=#Lq znz^TKcxb*9=O({<3`S7)PIuz*!UWh3q&1jSCc|AablzPM-X8f--jPYnN7kfj;$Qas_r6gR`U$CnT}W9)Z9R$I)Na3Lt;z|9(!}v z5dj>nG)kD2J+{_MpD!hmN7$X45-ZNF?{YSwRv~l6VSgxADDK=W@ZjyDV6#hU0(mb# z{rl@+xNQ4#C?C!edh=3TLsYv32Aa!-kzVO)>u@Q|==>P-V6N%kjB+cf>g3(JSzHc; z63l)>yfyPXzf3${z1|sc+P>^nT?Ii~{KJM2HlF`En%{>1QAZkm{zPUV8fHE8N?*1XvX+4ZrcjxwHBW;mi6bO=S@-^RM&D8 zDytvWJ{QxA0HQsQH#~777JmK}BjH1+wvx!|LSV^@ieS3$%XP=DsT$qxhRPzKL2D_g z;{*?Lev;pmetFgk>?Tr8_TZj>nL7kpSn$MNPV3bLV~*~}VqF_(dKYzNKmghQ0?P}4 z2#CYAJ8xV@Ng9OLHuxly+e6wZ7P8YJsSk+2(hmej8EDLbJ}oGd3(W+|EMOWm6%WbF zf~2EOB>I(1z;%-0ehI4A#S??Sv^UQ^9e^UfdOy-sEC^#%2os+EkrmTV2-TcC>d1mDrDn%--3! zZuG3qMTJ}Of_V|dy$aX}!;l!~q;=jmn=;|813>pQ(;R1G615UWN zr8YUWapHKPR=lOzbRi099N)}Gnmc}FNEKaK{e7PNlbt?cuy+CmmWUQsXjb|u!oOQ& zq1^}~bZd5e5dK)+ieunSQ$T=fOum;UM_PG))!CR- z^33rHkhgFzJ5>t+A#-nMVk45(ohCXKgP&g6o{@Vu9;c_YW8@>^@$R&}s*_D0_=Vet z-RNot-;V9LEb3#cj1U#4z=Kp5^*M8q*cKtdVGzl#Lh6G<8*I-;3mzCBpJQCCBH~Ac zpoI3w4))QPyST%ZDn-SHjm2UR^$+zE$w(s9DtDfVLCb*aWt?a{b5;^^upWatB7y>6 z{H4c@d!uynu#w;G{_Sk#>p*ON<=?MgF0*+Osne$eoX~MU?0?~B;`Lv$>L>MU=1CE0js6N?Q0f0{Q z0k2U_8P3$Ycy8-fY9v6yL8rR!#)*Ya)v0%3K*hlr_vyks4{Jtzvdp#tYXG}}cTO|C zra!r>)tR}I0J#oH;es!3t(pPDjfby1-1+B~x*$emM)egrOauyGH!_cpA9vcwO2<-7Iw9HM=T$ps@$LC`5CIcIg-L_KM~$3!B3~Y*sU3fn-P@=rOQYNwWi%UT&IDt~BGv;~C?@ zpyl2yV?nTD2OQYpruXzqgh zHhD^$DRtD0b}OVdI!vy`(d#K7UEfoqV4`hQ-Y3gxaZ zbjV7Z%mbpWPu(XspXD&VBKnzFd$XotYNa=YRv0Rb3zB-PFX)kwX4#Z)?vK?ETYqt+ z`49<>9z8tIfR0HwZSnk6TO3Oo$4i6CEoE{wR-lQ4-1>gvpnhBsS%>D+6?W4yyL8S{ zjy(jyQ<4E|cyn7@LvdNPg@-dSe;2Y)jpsiar=Gkd^75mt+T`uGk2VQmSCi7#A33zO zGz#|b0b{uQ1j+{)tiqC#*7`EGR7il|Zep$s!r|R?KxXa6Sy|$Ki>U}$qhQ^KJ2x+A zC4|V>>*bDNolG{V*Z^OEKwc_Ywn-1$2MTZ}x=bYe*kqBoAu7~JOU{KzR1B|T(Info z1?KM-=TuHKQ!@Kg_lrU5k!nwwEe3$>>q0(pzNED}ZD6tGql`Y;Pu7Jb)hnD6(e)ft z*}tS7<1eYIN^iDh^y+C6r>4Tr7Lkh>AbQaS8D(~msNWXhpS4H)x*l`N$FQnU+%7c4upM5awp`xJ~<#T-V3+iEb!?X2MXm3YD-&&x(GB>l}O_RCn zSY?eSd1Zp;1qYP7lqgwz0?W3pHsZ_u&SGMIrZf#GE6sIt*=uqDxYu#$)I)kZ|vY?T&tL|@Q zla`&*Y!LgNL3WlqFE*BMVWE*{&R00sO)w6v76}`5lA!MJGA{fJUi$~F{6&c zaIN~QLS2LeEbRZQsjZ=wlSrnN^RDc}x;nsO`kD2yM7I;f0R;&k>EN4u`@C(lz=~G@ zLtU$Wzk15RrSMntoVNLBxTsEt-oG2KI4LEPHk_`s%X8HU}?P$37Es`Mizi|V(uN399 z(#R$j$CkrCQ~*br;pq`oGb@w(k+SD{Y3y+bJN(}wc-NnON&l*B9%UCb;4U8f9<{`N z>L?F@fRoBwi8R7=ld?DbR-=j3xCm{S3DVALp8}osW1WwY#KXL_4!tQgBZ6b=2b`{A z$^>!)tKIZj-exHfmi~t4R#4+p6g&6QXY0-&Hyw_tQbSKQy$Ap+uIS6{Y~FY~>Z)_Y z!3Dv|N&&re5~GKQ;npr0Cy9VauXK7!p*$NmZvl~iS#>@Y(SbglF1^q>X!6?1X}JyZ zMiv{nJ>@KF5qbA(E+HyI$;6HxL4DA~;paCYQ+zc!TzmT_YNUf~bLJl)I%>@}b{@~& zc!}>5mbW#b6^j{M8zx&-))`aktz{~{UYiIQ--kSWow0eB=8l{_b{kB7okARc*&R39 zQN*dOh)0iIY=_+Xa2As=xg7uLNzdp|>(ftqgXyI>LbppmwPbM-*Q%1H(@v4D_t~YK zKqmn{SV#5&95%DBsqNjUi#RU6J=OpB;$#+z|=h1>8KP5{G*3};-@$v4y z*|goI#MV$^uVRrl9V@GeKZ{S-wrVnEHL_~UX|hZa3{hrhvuyA^e$J$#Y7Y?s>+2w5dLA%`;Bq2ARn0dz~?YndU z5ZXQ3oa6jA$d4l9adbSWMzMqr82Kq|RaGapWp952bk5TEG}Go`@53YZw`K--+p(V? z?qU2k$9_Z{tq+eM1C<908V3gMms%FRLybF2@}2%~8aXCGi7Kj3qnpXMSuWlVHZP36c61T7o%vGihZ*`E`l%+?$5$AvA z%#(L!{LP`LIHO|y!UURGBpW;7I)ba07o?o6=?CY^EIVK{3CSS0Q-HoG&B2@Ag$rKO z#Z;4)TkI&_yX101Xl0q|?Q@6BE)r|npj6$>3!5G-DQ1GntYsK5%#=Cqkqqg?l{zv+ z(qJ<9qO5RPYiobn(u68<7d_QE;sPt-v!3)DLm}6?hH7Abrc0iiUW7vB%$)4b`)4Z+ z?(95gmh>pY_EzlKsee((cLqKHk{RWOIbf}vEpcs8*903GdJ77^7Z#cDB+glWs`509 z3{$r1-gRcIbJ)8|)xagNCEKlFCT%5FE}{+PqeOM@#(T;qGx zP&gzVjl#56WVZA=|AJ;3wAaAFz-CV=hS7I)QWS$KhPh&bzJwiMwJR&^0|Marm(e3* zcxC>fMAuk+(bvg4k&sQAVp11Jd#ujOe77QR2ih1ECqUPw~MIw{S(Jg)^>YRRwq zd;)FEv()I8O&tv+NTHY>9KIr~;#!ZmQi zfr`|fREewF*tgZxj{kwiI7g$Q>*Z$cARMi!xyY{mM=LybhQEQ6b*~?wg57snhQ}dZ z>vuO_Vf9+y`b4ko7Qx)eOo~5(XI}Cj9lY{dPeIQ1d+7`rpWFqRBvS5d(P5OLw7qqfWv08T);8gM%d2BUg%zTAvxHEi~w z^$sgszdYTDsSJ2LhL*Q>mc8b8qiatMp75Trr%juJArJHaY4LP9b2`MfUWER+^i!6X z)Kc4oNl&;#?ge%6!-Pq}V-|A~L?U3%y&jc$&a>T;Ma%;G`ar1i*5IHEpIp{An{~<+s_{I$p z{xBZYEg&{OJ<8EQ_4PKBil^opq=W)a$?6Ud{GRiz<# z*&DEbJG%WLu(|G0Vr>H`)dL`T0M!JVC4PSYY44tU@h-S#i18y{l$0suN;9$+{;eO;;fthlD7P1{k`Zm*rOv%PS-zmWHz6}~WdhU@C$t$Eu>wMh zvCip(ur%84d>cm!xk>|B?IGN>Fu;$9?NjviWYaduw9@TU# z*qj_~`r)374qM~zf<9X~WVz1)8Yc6_9Kp8sy3yqO&W-ukR!rIBTXk$7%b74FAZS#e zce{ZK9M0%}2ry&UE4C(I8xULeZqJr{iFal9Lw$gqMC03;BZj*-|-9b%a>*difPnSVMiXZ7tHb ziMo}0p$K(4`$b`*pt@hqEN}reTA%fErbWn8eD%wDzLjL3sS~^gOlv!s-4cF&Q#(2M z)RnK~c3o_gT-&w)N?j#d?u|h+H4?vEvBpfMvhggXs+>@?)`u zXJ^cx#wP-hv2ZR)whrjtugZO+Gk(4Z$`gg_Z0USzIxrIei1IBoaUCnS-d1!gUNN%0bA$Iqh+T1b`GQ zaZi&BI6`sx;sB6uIulyaqi?8m z+TqxKQ@E3NJ`^!vqWy}j1%Yqa_rEkp2HGIo?V%;8900+?wi0uJau_u}+@47X2h1Pl1PN;u*(nrGIK8!Zy{cxSbsj%D z_PvyMe{aQ>=~$wc&j%h^Wv)#ytk9wo(Qy((K1?3c5^)(cby)dHGa5q4jKfGaJ`jB>7;qtVoQsDMTOgd z%^@acG;Q2o$9$R0S6@zPh6PPZhMORP<}WY=b-=0;@)A_!UJcB0!%O)a*oStvq;6Ll zbY`UIT0B6>!kQG}N0N#vvdoF2zJ$`~d?(uG)R{4Z7NPL;%m=xBDQ%F15SiY|xIBmr zknveZt-yLTM!RW4EV%?`?vI>8NQ4}yX*hM@yt;S4;sVmwI+F#jkp4fYwhb&bER|K) zzj(p?P%bVmM~TRzdyRQ|_0k}&go)BjlzPzur+3o`M{jzL->5AAe1!^9WMTQZIehOl z7bOWkVU<>x`F+iY(=adK?kg-N1 ziFoEhNe>ToNdU)g>KCd-~ew^CI=%`YJvMpm!&AM+U`zqIEdHm|Q8b2|p#zwxd zNGxhSD7W2Je!g9zO_1NvJHUkX?SQCQOGUlj+hn=r*FRlV5ZY8QNd~N)J+A_Axw<`G zfIu(kEVsIou?xt9t?oBQ?)G2qw_D-PMIGBPTSA~#5gM)F-Zx}MBT=0v@R^(`hVHd( ztyHaBOGs$qGH*J~Xx)qt$%l_Fl}s&FYXyJTfwCM&qqaX&)7oVEjVNLSY&f4bqZ+j9 z{%kf~iqamDl3d=^fcI*u&`{ftgBCVj+q~X46G|w~iMBjw&&0hpuy@q&(k8wH&f$`Z zSImTv|6%*nX)*3T`TR0)^FQQG1j1)RObl=b%FOO(9+Zj90d}I(`OeR~TUjwVt7L6c zU?yKA%bcrP_0FsT^ej{zzgNLUR6F)xQX1;K^ zJ3)4^?YUZyiE=;JBu9f5X86jQ^G*FGY11$A@N*xeo-=CX7i z%@=g2F)KNms*#}{-q29EA z2%UjX>s*$(Y5FO{V}GY>MDPAS5Kice@l38wktkaJFm5$)={~H+3`<)u>2H)pZG9E{ zzGx`q-D~`S7v^2%c9;7<0GD1~qM?hJL~EVRH5-aizX zov*YO4CYTBeO{>vNS|?&R^mw3)Che(XkdebIbZKXFG6m6uga5=(X!Ej7 z*wtt!A;X*c{Iu&Ccs{c2>MyNp(-)<{yY3qC_?1wND{jLkf%+s%@==4I7ZA?Zh_zjr zDlC!MJYQF7Y0baZQFI+v&73gWxnXS`r2bxShIRj|qbRkAwuR*3oN{71 ztXpCCRQ%YB?1Ws_8mVtI`2Jft7q;~G$`t#MZ8z-vWz`DqquRCbv^u_= zfD%zVAt}I)aeLI$Hvf7*rz9)(PULtgS`M-}KB%lkT_HnlS05WnWI3})ii5J&>GJ$o zyV^%y)e1f&oiKaOGkP*yz_PTak|oJA@H(kquE`U*TUvY#ILe~bmx{&?RUauF-MyXW z*{C&q?xJbbcaq(ESjbuiDdNpc57UwG_K=y`c8Heix^7J-M-mB;lETJ?L=#FI8h2{CUTkPc z-XUqFAe-p~@N=I}!6H;3TJ7moH6+xjep^)usq)0cd0G2n(Bn$OfK{}>id zrQ!Mx?!TLzH|z7HZVMP_xre&G&aj8^nAa$=m5X4S3{@R#(qi>=&X^eh zPa4!6xwnjlz~_yoj#?`Zlmy(JjbHr$l7E*UZJ8uKl4BO-cunGl3ZmaMF{KLc8zICBtp&klJqpJ zzF%obRB%*%925z$q#pJhgR`Pd4^Ea9G{YVRZ!_k&^*BGbf$_ybSt%>NqN+r%tEA=$ z3g71~yuS1qK@f%X40=)m>L#eV7I=6EWw5la90f=F1OTH$ojFq}$nv-q4xHN_DVBHY zQZ!xpC^1DTz_aqbLjp*JTo<)wB`oV#KKl4y;o!b_i(CT_G~jl+NP=iX;|r`RYGR6~ z6Qt?f3?{UGS&Y##yYr-G6^P|!$QE(k(W2*^J)KpIs(-Nu5C1)xKmhNCUBd)8aBre`#W^ASAgdI3N7_;idj&P0WSHI(OYZAv5M1aan`Ofr5=<+Lz&Kn-!rt z%l9!i{U*-c8w5A$P#jPG^&p{jY@zxEt{I`3hMAd_V)t4Zf(8rDhWSM5U(8%W&Aw(u zN}M{CHku0;*1SDqmrlkTI*~Qmi>9uP{lzrVBOv4%4#z7jZ{)lS_JY`WN4Q6utrNs8p{rM|JfQ$cVwZR$quMu2y(I;-YaPIM_S*k zaTsG~MOjh#)u5$=xU(*{=2>SeMVekzU9;EdT5v>JDdKd z$#l zq6)88Tw2cSja0m)hNg>Q8LG&>sP8F+gkcCig6Vp)=Aq`VKP^mr@=#vBR?^d$4`hKD z#nVU`FNf3KjF9=JW6gu3XrKk9Z! zUtaDK;wkrT=awlATLRLkQ0Yy-e}Ndi`wB#I zRZt|#jW=}|m$7N(E*x=tO{G@Qkq{kunAdKnf$E3cM`wkFSvTIMq+z)mO|zd4ori|m z=9|`_k26eiCU9;7sSxwJWB<{3ia*$& zbrf~VV5hg|oUe`pxfu*du{S$;dYR3d%Ix8x*`*xsjU$*p{%>U0T09o~ZDN#3w!05H zMz=l!E{9<=FE? zjEu;lwG%dG5j1{5;@P6%QMXU~EQ-AA@a1A&Ked9~*0wCKJ8pL}vL7`{QxM&U@KNkdIqun@ z7AL+at=89O6mYe%O{&Z*{)OJQ z>)Eis2IjCp9=MGWw?AR-6>a&SUYctg`GjK%u{?jt7SEaI73Cb3m+eMr=sq`vw@*Ug z^Bvb&tEY!x+!g6bgTeVjX-eg%6oczpwR#RYu)_7&@H@vG(cH1^y;kDV^%{jELdmW<&V2vcdq@}DaV5tvYPCZ>>IGSVM2+6EKbq-(}WZM_RdiU zW&6&h_1V0$j?spr6eo%X%j+RlN^c|kr>R_z&a<$D0LcqIJK^6-_RR|xi*6*(C*+F` zyKlrl>xkokMgFX)>w(A29I!B7Q>|O4(EM@B4KsSN=liq&C9I+s1h+vCuJzKEYh75^hT_5KoG+w*y|VJ#is z8;lRZS)C~|uoSWDd)%$;(>UIvrqk-D&jIX@uO3+At^)7wTk@;>zkeS6wc6!nZ)}bH zW;>g|*m74)>VKd4yYX)B1VZVV)X%IuDBJFCY;4iJa zMUJn>Rs6g@J9n;-4_c|)QZ@kl_&m86xBA>0bSHMb|DT{dOURBu_nvalmf=<5AY;Qj zpn@a6;({X#6c+r!KC=45sV`*I{z71rL;i&tcJt8A_M>X?57Xx$_abf&l_0vgM>Fc1Lyr>)X z>whbrK>mo0d5A|5i-VauyXhbTiLI`Z6w$tqA^(P{`a9ZVcL!YJH!23fnY&yG3B76@ zSJ%KnAuh~uopEWl-v|a`3I3BDn#+@&T)v*;^jYKcs=C%|EF_l_@A|;WpOs+4wZzLPFvs!$eQ1untc+xg(zk;csYP1 zL@4L&_~`@#K)tLI^UMcXO#zvx$Lyb5n!nfokINF!;6DnflwsjS2p|@GgBH1k=)s6;AGf#($lhSY`zzKtFFkji&u>X zGDQA!$0sQbM1fkVZTGu}JF#snP>x}yT*?mA$Ubj5F-6(K9h?66Asv>WGVM+dpPv%lLsH$eM zj8z&RjLZ4J_@r5b^3VUqUFu0Q|5j8~0qzusYpnRo0)qe8!pE(zNp@&s9q=?()|vRe zj~g*-zNMzDv#!qZ|1tKKVNq{k_b@0*h=52pNVjx|Afa@RG)RebcXxxdbj|=ncegYQ z(%s$N4E%rRImhRHKECsj>tbr(bML*^TK8HG2djVB|19ER{pVp!8PF=1`rO7BUl+W< z)8*#b9c1b4aJx6*@i3JA9I#h}=mXllxhoSjs}BT(wj{xf>d%P&e(WlPUIb=e$x2IP z_ERQ9C7-LHyNp`T*pTi7q;1jncB>)sD|TRqJL7;1z;oVa*|oHtg?=@d8yjxf=&djQVrf3vBs47RC2V@xZ}U`zsp zJgk@>8zSaRl6L#6?f$`VZ4@X}yARnDUgvg?^d?wvjw+@c$3TYA^xs7S0C||~OR9$edTvdq@9dl+z`?oB*IZ9egb^HywY)R?x9&uvO_LCs zY3@tm;!s&7kOYdUR@B9m%$YY;UNuSY@kb0(qK>1##>Ao{LFv!geTdEV?J1e8wYj*N z2iaLbN;#wKZ=TV!pgTqdb|JKbhc+V8&3^n{MA7BSVMKMekn3RVpV$v<=gZgcQ>61% zW{V+3q>+(fP`lnYQSbQa5bm`!>Hj#;#&M$i$^4Bbz;feZFDM>Ns$FZYF4$=hL)J7i zPu(*EehLmc2?6oqIlwSDY2<5~(Y}k$XIRnE3igdg#QAS}96Nty{Mv6fMktQd%IJG6w z{~oRmvvd9-u9P6g!U9OyftQPwL=C`J2dus^(S->J-(9@Ht^F^31SQ-0b^D{rt+zBG zI+EGIV6*6m*Dd2o8(U~zyzy&8H!pop9VXLxV`Jk4V{pYc_JVzlalnPHDjawWW*t>_ zvj1lVOnG6ch3|tJBZ}R-=&Jht@ow*Cbl(ssZrs=)apoCO{ubSIaKl~yJUI??*q7%NsI1eaZhtif1*HbsoB0*U0>^droxz>zSU@mMHBA6kr_PdbD8CRi7i9jJ}NkocUIDn^H4P zkgF>*(N`Q}hM0&lYP+Bc_uqF>vC4P8Ck(pjsrsmetV|`$OblqjI6ca<79RY7+>|0O zE8Z%sMma>0LQzdaR~ZKt`8mWme=K>lAyAbgU7?n^$qZ4%rJ*i|Z%SRKAN=c=4p2~` z?r$8-+e+Jn5LnovCcnlf^n}UIN3oSf3=e5V`A75oS7pnl#LvphEj!hviVIBi>}7!d z2{nCp4ZD@;w{Ws2f9JE21F2j)=y;9`ra?0lmu5AFaamt{Y~}^FpmR&TVcyTu8e~iY zQ;Qk>TUHmZw`BKY(o373Q*^F6f)bt{d&?yJ-4rnk(B1{`iEhZ+d$?WshfnvKY!~F* zyq}DQw7J~g<_cLdf}S^iMhuCESeE6DCmmmSx!z}33dUE67CY*#@zGS|w0Xe6chE3J zkvQd)L?JGIcv+^lWS(+hdYPs5EGWTPSkSpP8^V}H-&ksYghC0xF5d9RZKlGBJNVyZ z66ZlpgqNa#KwU6}Xr7q(UAE6KDMQE+pOzu>i%!Lj%C(uT*vG4j>RTrn|6W0A>VI>>l~HmurbbZ>1f zC}kisL-6hE;3fD2w%%gXJsE#u2rV+&i7aMhN__MFBh6 zB3lK&`SD>3(%9y4*xeWw)}4@0(Vx62`&GXhTFgg;hV1A_Ly78+b{P8KT6^Q%A}|O3 zaexEpvKixmJ;X8JS`o`Nt0wSO1;YlS4+U^y=t@6awXretTJ?XGG~S;_{MMIj_vnM8 z%~L3R(OzayrK(C6)Ex&A*PvgF&x|}EOH-aHM82D6-ZFiq;>zwJn<0oQE8EA-0=9DKwf->9AJUorNF<}`uY;B%u zrL8^)9yYfr>nDW`FF4Iz?A@f;vAr&bPOE}}xTA?hL7w;Q?45pylmS0upShm)btK-U z6gJ!lH{YLLY;aNL6&9|J=y_HE-VTAe$jgsw*zB}w+i)|qd*|(pvXX)w^2~=W{3P0K ze4{Cf_?O{1+?!x4&81R}|GuJ~CId?WrWaVER~V5GN9X66dJO_i<$d%C^N|LLJZuc1 zpoI7t2>hE3b-Hv>Hr6Yo0_egX`7fOa+d3&cuOL4!gI!a5fs8?X$nlZ#fIz2PoOwmm5j zHmBE-%ClN_udj~Hi(tt9iH*wCv2yZfNA21T7Y(!t&i-cOxphEJKuVU(ZW;W7i-ts* zDm%@x&3SiEnu-u3$ZT-1nVAVu#EvZTbK@1aX6-p(8azI3FU>dftfMfn+kwY$Ufx+I zjtnJa@^o^n60^ESH7QA+jk)=9bo*@_XFLZtk&AtDWWhn3JFfX$fPmo_QR%_I);t-3~>HZ@;EG&(I3cwzaps#kbZ5BSDiO8XDiM zrVd>uIwimyKR=q}z|5-twA}lf#~ye+7|6i$Z~Vy3yUZ(KTV<_?R?&)`km_am1whh- z(qGqrY7&e^e>U8#^$i`=+`(fMc3|Hse1~S4pUzfRUp5U z60NT=mv+fxbh|e*n{61e(QIM4tL5*Es7W49U**dVLPSts3|0bHJQkA@ovjRPVK7c} z2vZB;L;zL~0Z5%Hy&~`%f<%ns6cgiC?>C6~Z(_A0M=|$hB^;|+({XH3asN+ijt%#8 zZBRGJbO#J?~KL7rIBT0gh9 z<;JPDOnuJ&nb0s`jP9uazcX95iTFnZe|#VOx2Re1@AqbEQ|sIBt3s!H3rwMeSGplo-xWv-9;Dyv?)CHQ7f)*<_TP6{S&e_kz9Ow{Qc~7nVJ% zt4f-wuK>g_3M=rCW*g|Ddd+8@zO}Rn0_1wiIDB*CuGK{?xfMrsnYxmBRpk}m6R0bJ zaW^0s%#Pn50q~rH1$Oz0GqRv^eNA#cGWXy+yACpbBMOY9$%N%-^NA$KE2~XLLEPGw z^yLg#dwj5&58d%K21R|^=X-$)kb9^2hC@kp? zzk=Yhg!OcOLQt1C39%d(5pkJ>?lTVhjutimO%)r>?|f`*Z5l?RCrfP^3}h}&#V0Lq zwBR_TY8e~T7}H4XI@DSl`Ju4Z(?{aY*2YOgP-3!7qn|MT3t0J6WxCEGsavV;$ zFgA9d=Z7U_83JcLvvHi^GS1`|oNT+UpGb8-x^76S&0BnUJh~lxdu~gSR|;LDV@uM71KHF8s`KC;^PLkP+~SKc0%DgB8w!ia31&P$vHj; zcBSq?pVltzPd5{mz1-d>OZ#_zt2yBYtJ;uZy!3iJJ5QkLhG8(hloBNf+*I8nUjlMz z8@%E~e;dcAf3V%yTo2|?nm)cx4U%dxZA^9EWP8SJ407F7&~q(?6k8ZLJ+Af#!9mN&nqf!oPddYGv!X8@g$i~dhHT6wBm#fnW1(G? zI?f~+L*I{IN9i=W3qM)>jSFq9;_2;t_A}OMI(m16kehkDTowch&y5l1km8n;WMg`z zp-B>_To5Lm>C&Pt<%2*IkL7aRnXu&ELXAc3=k0XsW~#Jt`wltU*%3WLjwtbvR-w1B z1j;uy3Bab#BYF++tmpzpN+Vkncwt z!I+k1ZAn-h+(OsijQn09F2zsZtZ^Vpk?7)JC_Rf}*}_X24;2S&n(4zb(S66x)^hgF zu;c7&1seE-57pe>C3Otfg;lPG`tW+qdz}Yz%5f(kV1)-zWh-(d-LH&I0*l_+bFpu& ztpd7@!ash?$4i49EZ-*|{D3~4PwQ2aW(*lQ>g*R6|1d8whZf#UE3{QJBkxRW zKKu=yeGG9zZ;$oyVd3-I+X zKF0V+QgML@(=(2UQ}a3~*rlmAqwHIpS8g6*S3Ps;)CNPnpX@oEObk6GG38;b=m4Y* zRp!;}e%(QJ$5mchGj*~b&Q(}6Oe7d|Li zVL6gi3@_wB&AA)7-&-ZQee^Zk;2SbJpl(*Rm%p9rid20P_y*7+YOeh>7dG{K&W8utvIk@zMCsF z4AO7*K4q+c@3ttbfpT_5>*=u2cVzXmMOKCZx;09!#d~$&U_e@O955`)zxA!pvgkPnoNiN*j;72<-HX-i? znh1D-5YRpRkY|CAxLp^H$f<^A&>{mK1gI zOWg)5TgZb8+%sr&G9s{Vw|Cq!G{kEVyX1B4<3fQQ^vA zfsm-*sM2AP-u%CN;2p+n1M z`!EGPKJ&RmOF^w^CO2l?4V>5w*;i+yyn6aF*vb8y(qU9IqbECN*LJo_@z0FV$8FO@ z9e+rR%tV=LMvVR7tG<+NKGLpClT|`{crUO(J9FZ>`_&ndOXz zbjFKQ*|u1VsNl+}s3|kRZm0P|MRhUgNI`s*7$iQzUNxi* zph$6nB9Z(~doAexD+#x&0}`LmHl#5tc(TZ@!-tPTf?v>Ds^fWTx!#J(b^%5gt`nz* zXEM@nh$d7k=Nw-Z0apq{QflV>G4YMyW7?JgJd{0Ir5iIxzkRXRcz$RbWQX=`#CJP}}@~536bp!cOa_5>%=+h!7x z{+X@5NKNVrH+MomFku)rmQ%S#@W$M~PM_OPhEh!-3@dP$g;gMmnIUW^C2@EoS%4ET#=CyffQIy^b`4Q}!7}^LIIuNNpHK@bd-t@8ypiEpPEf za6VU{as7*?>y^CaAxRtXzV?ns^63ye>Ma@)D&n})1Flbt z&R|S(L(+mnF}lKtmm3@d#O-X}%fJ=|N=!7eYY0u|@BD@1$6gyRS!y(9Y%C!$inaQT zo+izIdY%O1C86THQ|#w!eQpx$PN9X4+V48!J=Q-gnSFq)9T5E9%PrMjJ6F`~Rw6yEN)?*ikJZ!6lOKR$Fl$CXGd@-fb+DyL=44sB z9)A`w0vq|AWX3c=YqCA;5VVuBp=~$1q)z-A`3q$0t_LCl3W!HDoHerh{*Om+X-Vr< zmCuoSVh&Ur#dKb%B(i;HY#?m6ZSg-g;$pFy6@Yt-xHLaA@FjwZ9eaHAKHkeRdtkiX+JoQ+z?9|TyaX&YA`!1O;m<$o}Bdihfa&H3>eaa7N6Su~Z z8?8Q&{L<5g5qySKI)F+FO5BcieD4PA=IzMPBQ=0yY(iW?C&%u=qHhc&1)8gKIDURA z_`FMG!?#Pw1oLoPbm@p3FG>uTwqNWHD@!acFWOcY z>AY~Oa%NAK9X(8W^WWvQ_3atr47rpR5!3zXET@RCOrbJu9{Ni^woE=+no*FYhu6{J z2%BQy-6Z%~%GFv-SE8Z2PA(NTxi@WV`Wx;Hi=7<0H)!5b?VHf z>&Sb4XF8oLlH57dh#TsJ7_S**``EK6?0(seA4PVsG#$^4j=(N^{^YrxTO7kSf$qLb zk)RAXqkM3_XipZqk1KST;tHjX`Ic2#tcu-`CJDb9mr0h5a(g!v8lM6o+qaruo~y2p z9U_A0YotIElW>B?8rM%=)g9@yJ(ZV#e&fPozYA-bgN})nlz5-sHN+tc`2J~xj{PKS zwyV27OGjP-FFz>DM#v2fwZOZ-M#qp8^=Y=8!jM~{gW&*psPcKk04%| zIyQOU^SRp5bxI;gJ?cjpy-)dJ1`P1%n=5L8lWRB6jV}CA)X@O$+Op?cUA}c6Z^zhW z*W?0++X6-tRPR=I#Hn$@x^pHsyTKJyFyrHs8S9S%!h+=aVU>XQj2cKaAu&pJ1YKTU z7#4~Mj;lKDZ+O7~hA^&5vJ;>n1QK5(GxyG_T(Fcg&sQG$Kmz@I*M8*D1KwKVjPC27 zmbEg|GW9g=o|n{tk;+MX*)0*NNzekse8+NfvPw082v2sm0pcb+(c>lhfG3?7NklYXdb2OH`0K(?IU3BCUysP}m}%F6b{lZO zm)?zYe&YB?Fi__GxE1~d13=L3j+gJ_kuECNawOUL-s)A#^4=*+*B$Aheaf3~D1lUq zOLEjT8xYcFuauxxzdknThJ?FO?_%2NGR323eYkC`k8Rp6-qW`bT z(1VG$S@7lS=}Dpjo~??smluQcrVo}BZDuMb;`B07Y%tJ@nAp}uVa%)zg4qu9JV*c% zEFSxfUR_7oLVapMT9hxUPH1K3KdRx6^IQ;>I)cc|`jwGe(Ufvm<#+ty8icLeyK(WB z7oHtLDB!Rh=2B|>q_LNNpi+%UHP_K za%zF%9O;tOa5RPgjW$E^^c>MfxKau%VLSWF{-Aiujr>69S@<7~2Jpw7T{qhBZ~yyx z3dAq}mjhpZMc^}tp!$5#{D#B)dVOfT@~UOp zQ=xO53PQ!cC&m$FUVP^w_X`H+CKPXXO5?{m)9rH&q1HP4Q+2`rkTjuWfR>-(q0IUL z?Icjd(*9M;LK+O+lP6zzfgd2KI?e^EAZ29^r$+-{+~1z+NE4zVMLAUe2wTf%l2iJQ zmvj^zVYr>4U?ZPL9lsTKGTts1j@9|B5keo)usY>bZ}}@gXsYkacP+}z(Z@kWxxK@9VcAK5GvxP=JN8-rJ(k~tU287%H2EqjHY z+H;+w{>7Z#49pRKDbW|vmle{N(X(P8KZoXQMQBr{6J}&2r=(stl}~w7A+rquUbI(1 ziD&A}c#+hxyMf{cKgOjGfL3kUsIA|TW0`M!O0g-I97pa@c~w+2T9 zh@}f%g23IlG(bYTh?2TMoQ`ZB;6sHGq2TJ-L40hFu*z5QW8<5qO$ zF1^aI?7FBWC_6h=it^>Fc_z>(<>p7Z6R-iBvAd0dOCz49n)5@2%=i>2&y*@ucD2^C zz6d_ikwM&o`Lx?I(swP0@SM^QD{w3`Q*eG({dzPZRqgolh^z9OLLS!BZBiTG!cq#1 z5Gl{nKJfi#208-|v#aHgZCnB$C7w6pbjqYUDeSjC-7Y^h=+9Gxc9F6+@vjYf2@yq-37QFe7?lmur1e65T5Koo8Vi$7kLN7&~us zzYM$za-*ci$G{iOhLS0IU+*Rz>orT`CX%}z9yWi#gKtLb?pv5S@YPw+6%+vmQ2*?M zcOMdlJ;}ZQBY5t5iZ2n8#Hkt~SJBbY3L~swWx}oqq-Vi%TB+vdQI3vAm9tI$T?oJq z5?`UFKxSQ9l93XA~-s}sHg>Bt9VDQ*ZN(62jN3Uu(S80k-l=pg=o~p z_Sl`?!0Re*I7618+{)0gWfOnGEdy++%~+rIoYmxou7$=TMyLKbRJ?D6@!IBr`xJ@q9ddVUN-(84q9J(An<0CE;{w>djL=j&f{2c)-F}fE9m>`O@ zOy6mFqz@ux$@EX)^Bymdl4!{0p^rbK80H26a{DoL6?jz=>~Zg!yfkmT{md|$wB!DB zFQA>3V`HY~;7=7C{kC(_arBR8R}`r%wFJH>Nzf@XV;eJgm**CVA7QZQO+${`KiFTCa#c^0mCYZo|0^RqMADd0Re@tQP*{ z;oG?V{ymtZ9DJWw&$6U!mpt|6Agt-UXK!Fyo>XXI)WH6pm)v9ZFzp-Czgmu+LiN|} z2=;@6#~!Yx4&M`9a9vO+`^D|_U2h{f^HcWCFrBNH$qD0%_`l3J8H#y2x@yd^moB)o z3-HKuvDC!=jBU0%QwK799UX6X`j95;GF2M{S8b?}ln7QuXLzzjxFX%&DFq(Z+e)kW zZW;Q<#|lQ>bM(}ve`O>IMv)>|}A{k-OPku+#;YO7>;uu2@?!nTezjW*6Lp< z$ky^qF@-4;MfRwD-PxnN*_zu^r2iw#^PY!a?l~ew0OwZ?%^da~0?XoJlFYOvaPbk$ zNJEMl4^@GU`LmyIV$hr{4=>+)W1{a861%McG415m+Y^~4g-aWNvZHA%^`6qsi$PyA zG1YJy{DMKs(pW-qdM(t5BY`&J4FfzoKQ_EEWndsSRz&fw=y&@qZ4X;7Q`h-B+0M#b ziI};tN=f?acuCnIRzA_Mi3yBoI`YPLK~mF~j;|fe*` zq(`T!()a>-wr6YVd^pm^W8qBw$+r@nJawVMc^YbyP$*j6saou30TpfK1#LA_G$iIh zvdr{MJ#9Nq_HBSF)3A*h4wTlQFOZ>>F|258B$7}G{24K}sVWcy!=X(UhIJ1(k{14E zIyhLazO`l?`15;1no)}=9P~R!90b;joSi3o=o>yG=9E5kxLTdr*nPg(;q#RwoRv=5 zOZf%bB1`0|;?B|H>S~NF9-6dh02IiTRmy6F38e0Q?x77N_WZ;rFQKuU9fjA$zTD>G zsc##&o1&f0OTh!n{HH+10L8^IdIWm(mNP48N?k>Ya2!;VTj=F-(C_t^Tc1}aekRxA zB`U1eCn|sPHOclZsjYul8+hddb|j!~7vurHEJ;cNA43gpPy+J27Aj@pFfb1T08fnR zr{6yuZN6=$HovXpXLoyzoD}?JtZeX9UOm(wd)6O6W~xiE_XH$)y~|#S*5g~8 zRV7Ca!U;rUuuM&@bv$4Fb3CG_Dg0xWeA=zN=&(kmchFEt4kSBIn3@4UDKalh19Zen zk7b!@-t2s$Q)-%;O*jnnwpB4m4K(AZtc@wedVFp+E_QdBOHUKk%r;_a?sF2OCweVT zrG>S7;l$4Q`P&5pR}%q8h@^C1HP6VsPrHm5&-bH0y2Cdrrg|E)ApB8PvBTQ+w#S27 z&c2Ebs~$$|Sx-9+D!|8I{aMPGva6o#a0FZF3)a)&v~(9^eL)eWqOAM?ba7gwf7mkz zo~fsuz;SRzNT=%jD*roRWaIYisOwX|>vk(3Ex80(ZW%)Q2#s2MGw5c4=1e$@RvHHvMu?u#R^ku{-SU3tc|m z8WKuf%aQHwQj98S+OBE3OHcRf-qAw!cf7TLSD!luh|(?Z<;ivuMN?`QM|TbvEP2Bi z{*{wumsztQSVtBfP0KQ%1~Y(byVgUIPC3itKk2;skzzS76H2Wg|0>(`uK$P?QZDHE z9C=RD-#2URvmf2+}fkT4GgV*X|kE;9WO=pSwVWeZQfTl@O;*yXzVZmD4Yk+BWE3X1OyBvh4 zIxkFIimxKmi~D^F4%cfvm1ZV!V`s1YDaoU%=D9toGE4Y9NVFv6GUIyDkx(OE5)Ko}N6Lxw9 z-6C$-U3MUW*=x${V`WRo`3{402hSd9P{F+z4hcO}QNz94TcK)*=I3sbiHDz6r0Q`- z$720ih9fF&H-FnvC6gC0i&GPz@3_ILxA4?LI!S_qyRymC1Q_$`J({i?a*^My{_If? z+Ay>@7=DI$2gz-W<3BTXAUii5tSY8<=-wR^1+4B zLp#CM%?w$w=U0haQ$a!Db;l913gmk~RD9kKWE8t$UZ0sP@7%O`Uc7kI%!Ma3Hv5rZ z;E9dg%ZgToXZs)MXe$NUS|R0qpX&KMAJeIhdrA$b7cl1#0p-Puq6iV0m6UB_9Fr1` zfH|3Q9NLcgE;!P(*YjwD=5t(LN zZg`sN-43*T`-O{V5`L)Qh4nfReSFT!>X?zYLDC!VWGa)rXKeREE7RjP^Kmg+SHR22 zz-o1=>(etE2k+_gU`$wVlJ|Xo(s!78rV_ajm&>|^k_=x;PPyXE@zY7i$qI_nq7S_1 zJYP@H{o-OZqX_e!GdZoT9d~3?o^;Q&s7Om5@^tI3aF6^YBnNAL_Xrp##nct}Ggawr zbFj6jOZ11wabyld4YWCtU7Forxa{eqZ))33F0(i{#@vy*RLtnVuHxTiXvL&>_jKQ# ziVNc^hp2F=o~EBxp4oYd6D$c>}giUxhRWx}Qj=5>JoT`+zbW|W+7EqI|i{?c(W<{K70PM{xPF?3zW ztuQpy=Da#!)>hu6prhglvPG&};GJt3Q6LeTXt7>y^LUuO8G4YR!+0x~YjODz86{X$ zWLKHPwY+N6+Uv&5mYQloP>EJKnayg6-y0q^+%P2k^s^DN8)gPX8#|B(bSaPuO-zp6 z0Cn)5pV#^5cT5&5Ll?ICe+MRR!I$6GXhyphOSo}20n-3jd)8CKBAW*g%UueE7|$n) z<X$otn06w9x1RyMvn3}+w_ zyC0lDJ<7e6QSPqaZ^6SKPYvC>w)a!(t#4_qZG~_o$a~veLTvcP9diaxLwAAdhp&OR zSq7I_GJTT17-zq-e@;m=S(7~5aLuLzA5xH4~e9=!RFp;x-|%}6jxbH&(v-n07Pu!eAxUm ztqSAuf^|W6_rVF0Gp`fF#^!ll+G=hOvNH_ROIpMXA(n`Nof`GF@#pH)AB6CqwlRfA zAe_?lD{{WHV(W27FPB8W#4G=C{1hTx_zY)(lqqZ9T#e4Sl>*lgK+a{HkIK3&c$x6N z(!?tJ;fPL8+-@3IY8r(ruOxz0l%9ZX8DirNRr=hUzwLDi>-BuMeyh2rm3ZZOe$ZbE zc}eLi@@fNpy{pX1!|U=pNBSjs7J{vHf)8XLYL>@x9l4bQft+2A_r9d1WnJ=8{Hjfj zs*m=1PxEmIL7Gax*Xe3EFC{%;1kN5D>DQDbs|b|&7#Bzm@Q29x&$K1D_%c)>d599{PPN;QOuIeL z%`~IPHJJxrq9&fmh`iOZVadVL`^oaiS6WE&VYbz~OS@P|8{FE(vU)>>kg)L6~J ze6`iFCxEP-Fnc#`)karWd0~9YV{=PL%f?_9iGETQy6@rmP_{dfGK%~Nk@3*#+tz1{~?YokdhRfcYC zXgB9JWLGxFyCijyk}cp97E=@aK_5qe;w-7O zh2jj9USVt~GtdyMrQPe)$bvTS7&e!L!ST-)J}^ydn7?NDq>tH4d)Grp?iz=}0MC>D zYI3-RZf|6lh=BUyLXuq(J3M1%ZxOj6}<4?De62g{Ab~r!%WrrFyOIPpf@9L4o!NB*F7hsgRKP;n$ak zk+0!~hR+S(iYpPS_0bgOCkOO z`3^vfNwNJ6VB=mp5gq(Q9{|xp<4PXZd_MuOJZvmcS`HouVwTH znoSWi%kJ#7BudM<&CYsR>2t#|_X=JdHO2_|%0)%)Vmkl$Nl-T)YsLl>FQPsYdhes& z<53$6ZLIFq)g2*?M!!)YPJDUio0cyKRCz^N=2kt4R5*Xc5GO0s09_wEY+z~k6=l*G zA%he(f#M{~`!P#YvtM7S66REo-0Tn6L={=kUoHF2T!wZAIcYs%gVNLVZrZ%g-Jb+6 zL7QdiLSDs9E!mY1Z4XEAiT$cF~%yPbh8zRjReRMpxJm#=E)w1NPlv9|}8A5@;VYp>; z#`Z5+uQ0fUaKQ3>P{u%_EL}iXS=FKHdbI|*Wnbp?tvO6RYM4|Dp^%>8$kehwxkt^J z+{NF}SOkQy5OEi>+97DaUWLSpCg$DJ*t){9Fwm?k&hQXH+j7#S>d4SKA^h=ju5iHr zkmdS57$rdtggc4*53^J;`+`@h7!UC!1;)0ur<)f;@}8;Xk*T@BOof6Cp#n8<3jf&P zX71$W3pH2yY#76?*q031@UWE7mS$mVLHMInrsxJQP_|k|Y@XYSAWS~+?-&KbBgZT6 zd+F>8(OVQ?m4)7My9XFZt#t$+N4jZwUDcHsHp~F7sbVB6^K$; z%C&BodV6d&dgm%Wpa88{2!-O-0LHBMWMSv@n7Q;won^LI5@~{Y`7ocaXQd-sV56oL zb-uZ|53z)I7qMmR?E*M37eA*0!Mpe^=5Q^Z?VN_%yYBVi#MpjJR@V^#*f1l)GPT$L z<+K_mJ7^FAV`XKbRcQaI)27kL4o(rx$mYPa|q zunqZiQad}dB3c9!2i2T+z5VxO`)3JIAUrPg<=AW>=PM*8#V?SPG&)>oXtT68SqZN3 zMXmB7>Ilw^>MCiUohFzUs-{50HngWzPu%Jj7B0o@eL^XasEf&WnO?Y5aR}Mj2SbzC z$kC(IXO>4*wKSA8YRk)As*A)BH?5WxS>rUwe}En0M$V-7+Pv>JZ&RHIWx|M|19QZC z`_KWP?7OK}y*;*bjcB(a_}&P$mT#qSpg$);W>Q{Qe8%zE73R+1p=sYG@Y{u=uZOB? zUt1l{ej3k5R8|GM1@+{A$HOz#4=GdodC-}@6z=RszUZ=)rq#hOd2Ilo_QcblX=4k* zp5c(dNWE>}Aq>{CSaC{ogEUAL(!E1FaX$OJTIK2H!568=NellXKJ{=X9&k5Ysbhch zio&^$hxCVaj92iP89&rJH0*40aVh3h3bLP5-fpe`7SMTV)=u(rjDFRdS5?79 z4;)J#k`G5!LMSx%f6IB!!~|&1Ssc0J-g2tGBWbT??y6MJ%j|FQN%}7}uwJ~G5emlo(nzqpG1K8F|0E3P?Z(khLfED0pJPWZjO*tDoeFu;c zZEp9uoiKmIbVMm4$WMbh6T1IMq!#s8?JKxK7(t?C>HJDMx>LLYUXN3j!LJ%W-rUj; z6G2QhM->aW@QBmW2-1F?On-VI$|xg^@MEfPHtHM+nD&t>Z%x8#j-Z;pV=tr*D0L6j zlQXmNFtcM_T3nc!Q%veN(n>XZTR71^5pnU-sH_cvB7jj=vZ1`by1afukmKq!A(igR z8GdpQI(3(l*mDOMMW~b+aZyso$lEiBe$vq&I^9~ntIB%bSi0K?ymP8QM`Q~xFQ8_Y zbp)|$>gt#~!rq$}AdP-N*287lr6@)z|0I0%c+MZiEQHe>Ev14%|LYlarNx;V4mZt2PkOTwVIi1Kn!Lf+%6GDremXM}hQb|DIFG## z2SIK0=_zIvrvV}P1*!$|9ZjI`^qdxffjn`YeR1GSO11ho*7`3Xx-EPcAb8CGq-q1)mP26?xm)ANY)Iw`is#uD~t{(weBJF8wv~c3$23@&On2USDSIHn( z+U;2Qn;H$f7gfV|sHCt(6IOg7dx60=q8+Y8C}!86Z84?j-sY^5*7 zMLPPDwBfSmlsM^7PVuh$oyYq5@6de?kD30yQUY(!oynNw3aUsHC7s{wCZS+lCs(b=GQn<|BkPr?ni7x9(LLGec}4 z*9Y!YX$YBPGxER7?;(_oFq5JA*>&GRO+)F2f!AtWYF&*Hwp466=i2vg5p|l?STri< zuYgN+Sn-W#Yj(N=z13I)=8-W&RLOhEE(hwG`q7#v$%%3gL(HvJ z^06eT;BQrqEk2tM$e6vEax`@L4wE#G?QKb)nGHRE-r%Lfn_Hm{@two?;rYm11~qH6 z3Z$2Z(l)&WOov8YI8=Wwt1C^73;sU1jiE_$gBG%KHo<&P3xtHn$L@mkf6)ZK=y+Jz z-F4lfj_E|2*QcfoFrDk4g4ol|^Q?zrvg*m?jiIbBYqy>X!&h3u=```Fm@tDrm(as^ z*T{X5Iu^K^wDykCG3eH%sNduf$_-zFx+O+`MGBCRh#5w zZzhbSlzw5@!2fu4XT~^)N%tMQ;syrWNY)uBNxz#(^b!OSzJJ>sRb-VE63vN)c;ewgp;xdWTZx!;ksF*@xWqDX5?t5wH_n-@`UC4L-+4eIuH&wBy{=%1|{jy!$c{)z!{cp9jlW8HA zW079g7-x4n2pAJ>6+&KW#2&cceTnJ489I0vLyIZE@>3M3Yvv%1;!*{rnVO$AOg4ZM zavqv%8#R-l=<>3|Y%J@fuFjb#_?_&|QFXWxwy;|7&4~A~tItyeaq3jJfFfSoJ=`kA zd~{~z21gm!9cS~3gMje6p~Nj1ut$smBH$V()tO6nU+_OpH_e6l`z8nW9T*@nRC0d8 zgm6_hPI2;`T_wYg(}ds2jP(trX(=u592GFJh0={-gOPdq1V&K4CnbLQ{#}sB-1`g$ z2F9d*a4@I{kVXxOCmtq;)pT~Y-)}e3OJwS{2@2Lpf;3RLfFX+6k__Pnha)13!M#ybK!J7Jd>LdNgV?FANtYIqh&?#e<#60&L{r^k`u& zCgV8iebdndsOTE?2UJsZbai#-DSjUaNS6nL(yK}#lqUa+-GK1EVP>{Ac8>p)iZ+Z zeb48so48Hu{_q2d#FzOdz2EWY_MvUws}@#?)@GE-D#{LzMn!`(IxV~(h@-xNww%NS zV)BfbA07gu6PMJI)`6|=WjchKWD zFIu`m!M9PWNqFhPvs(!XgP8Z*oighEI2iwy*v%bTBqFST00@qcNRFILxT@EaN7`~a zR-roz?O8hye`7QPS&tZnugPO#H@8- z@Y)~h=Y?fpp;N*NWt00M+g;?^4GpcWrk-Kpvh5a@TmBZ&O-&`cBW?GIrW~L|zzd&l z`Uqgn02O1H*xFkIOeXlwfSeT7Bv=9<%n}uI=+BV7>jkRnsR>2~2XBkWTh%mh9=b~I z>+W8L9(=HE7mq1jK4yD`+w4`j&gOmdxD4N`BR;)RaiFa;Q%FDv9I0thKCD} zzy6GZ8?s1D^0ZP!JR=W29Kw*G*U{B_+T2!a1(r%n&QV4VcRNkl?|t_`!d_2--M>rI8<5*KgHu?@?t%4ysy=jjg`4?7>4Kcm9D}tY*rS}FaSSE zZHk9?bZXx*!c9b)obsBj z3=T}J=ql+ijR@ZrTmML-cfWsW4d&*-nFN7|hR$5*D;|04>bdHwsrWFrwx{9~P<+Yc zWhHVe4zq!&I(z$v%S~{s_!r1F5I3FigQB2n33zkfw+h0Mw6RZzFO3^17-&yI^O(>$`30e16OW5YyH4!?9cC^t?w8AkFK|ji)wqrhB+!o zN|$t}bf?mtGjuc3Jv5S%BO%=(DKT_+NHZYP4bm~Bfb=`(Ir@K|FE5{l`OVsE@3mLl zaoyJyq+>(V?xYL$D0*w-?Z%j*VCMF#P{S~nH#%Q~OOk1+h?T%nKLJrHOq`N3sh!39 zY#8s&*aX(jF2U`CQ`2Yxt1T>&F){w~vL}jEJb6_gOe0e{L58^%aqd>Bo%w_Uf&+2hq%jpEmSCWq8R0a3P2R#)GV}g-OR{`+ftH9>ETA2)Aq!(D|_D3PI1NC z>h(;#!-9_UwKGF$WHkvC=iEUgkN1cm(1vLLjgREh?~A@?>(B|e@<+8?N1D&}c-_w*SmkC#usVHt!bTEj&dDeg(S)!wrh%%R*t0z9!m?m*B0b$ zuhIx_CeK@)W|SHdBkVZXf_jY2HMDqa2<1=@ZD9ys~~^06K6;Tp$2xO6f` zWjI4yAwzfr3Er*2U7{MYJ!K4x2h8DRLoq&wY&F#Gr1x zl->+47UkuI%X1BIAk(m&azUE`I8he*A}-H)FGgrX#hu1moYDX(VT4s8D}7Y>m-@N= zFWRQz?Z%>w%CwO7i{xyoI>9OMY1^RHM3Md6JIzS;bVj-)Gr#Z>>82#DpJy&TSZyr{ z^V}Qegw!was|FP-{O?)a%yU^G(O&K6(@T-ujJ456yfUa?mej=~^MxjN9xc#{JgeUb z5f^8>rQ_J=QoHV@vmL!ZTA@`L$m+eGkZcG0lrf`ExQt?& z?WCuN*YLuBY?g6krAUit6PAfz4twThGeu7Y-$b5!b%tdRTtPqjxrccjrwr-@v?&ehKPqDU zaRS7$OOCv5LtI*qP{82FgNU_v3T7e12<7@4FH_mgD9Im3r51iXCNz=siU3B=gSV=p zg30Wf4?enEWXLsi|ILDRE>15b2X$bmAl|S&_N{hCV{@&509CA8O4X;H8Uny|6K;Ds#0FD+QdYs!k$Nzm`IL(;{R5C)2nLe<<4tDyeV;zNPSF_GEJ0g|PK!knA+9|by)&xy^y!zgNO z;Y9iY@%E^Hez;cXERA&EZqZIGS^eSc|9+jeIlhAa=Ql|0)6_^PfBrdD`kmq)9Ps66 zA`00Uq0at){?=PWsS*0t|779UQ}BBCT?OLo(S$Z9`uUIzOo*%U4WKSX?2#$q{=Eig zj`2TS18HCm5@{p7GtPlXz1Uaj)fB8?8l0v8pW~j@3t7}N-(?}*6Anfa`{MZNU%SoP zS$zb1qoohA4>Q!RvhiU-OeoDcp%Dd@p|!at=O>!wP0=SBP^m* zt;yOcLtkPdZq2uY9k71c%q5TZMqGgRDU;KgUW-MFrwlYr=Q_4--V-$<^lZyxX4oM%&#SET=-( zA^GbIS4630k3SPR@k{x9`N= zsBgSKUc_RGq(f}K6;KYJy0%1|*3K4T9$rIZt)E(;#|lFc`0G=V|I>c4_f=2S+xSc|VUUa8@j$derNYs`SiJ_0ufG{Yca`SL+ zOk8E#+oNsowi#8{7r&>K?J#_29Ei;itn^RO%cA0TWq#dtCX44 z3(GVuw$@GZL>0sO4gll(xqmLO^_4$8l(ulmEptINR$pmlD=cv+2$j!wRAR?4fb9WI$bfYpUUX8G_1osV)ee8sv zmy5fdQ;*LaBwdvV9a(J_m>Q1$AVI~u;L9#SWyUf>c=5(1SakYtT-*b>}_D)Sr#1T_9P3V{bo%k?wMJMYVt{J*b6BRSvgdU@q$wNTI=hYl53js%*nBmqsQ(LGurQS zxu|-mnz}k}U+SQI1gCytsGB1R8fuNs=04jp{*HX<>{vtuFVCNDiM#YojOmgXe;B-X z{n}8;TDl)UG~)SL0oB|VTG{$@1#CPFI*?oYJ`3tkJCsxQhpDf1B&EU)K)_?1^X+ z_Q0)NM6=}DMwU%Vc`xwB#|ENZZL5QD+NMoYvOL7v~#DklS^USg&moorG8jQ=6KCBXp z`zRTBL(}uy`lLf&2j|q<=V_xr9pPlG1ws+}kEN|b_3f=z{i|c`lY$yRgJ~^o*9I?Y z%1`)h7md}Ub`|X4(Swfknwv0Lw&?mcBI}zEYd+8=xXzQy?jyXjr{3HmL>ywEYjSYW z%3QQW@e!^n?d+a=FfG`{8Kq;dT?GgdbvUZ5?>sOIoG;0HdlDCx6|?5tg~UsoCXs>>i*du!(6H z%q3+ZdfO049ZB6zs{Ht~jx$?#g(De{JcbEtiaOL~#rwnoL7qCxDS1~`D+C~71ptxJ zUQy)z-5(>RpPNPF2#&oCFj3Wy8XiN)S#+S4de&Jevk%n`d9ZWkMkC=Q!b*lgC++=bw}Bce2~bc zi-TL+PDo8cZ##Le^Lziz^0tn0HhYD5+}+5t{nPvdEmBLhtdrF7n|N4-d4P(dik_o5cLQ-EVZ!ccn!R9PJ!<6wq_NTypkrq#CZ zGm{!ik`iH|)S=V^te-y026rFz{y3dxPhhYH@Zw1U0l_WGT1I6dBz-bPNwocoAXPz1 zGNy@*It5HMXM%4|WQm?}E9X$B}Mz?m#0c1wtg8=^XOzgJEc*3wd9 zm|5KWiRX(k?D8UhCa!Xo2P;tluPIlwaB85ctNCXFVeG55Q4+H7nupAZ%~~c)9;`kW zYo9e?{~u=eo8?UvF`(jZsbqpJEs|e>e>SN0*RN26vV{Yf<#v}0MxW=JXVqyISnFK_Ajk_^%n#(xyKxD!UY~Gjs*rQzDBbaDN?HN3JRear_<3)U7 zQE~%zrP0$URp<3;ItLJ#5Po)IYHVn@l*f_0)RMu-F0`?5$Xz@j%tERuWYeneqER}I*$Iy=T-#A^f z>q|wxwOpXCKBaPz-(bmiPY81;DnmILndu7L?|Qj3rEKK3HsR9d121K!p%I)aH_IT> z%r9pRKJvRx46t5U%$w*q(Gkiu>tQ?>%^r96mC;OesJ;N!C#Zw^Qwv3&NwRD9V)e!e z#`oxQ_;~u8U;z@~mC`6UF`Hv?Hjb#Qi63~Tjh0G)ye6?C%q0OX51e0YsI5>SK&#j< z{_cKoSuM`?#~BZDF6j7&OD>YvGry*@t${mU#ozVN1mpp)ndB5{tvg9v>foV4ZM3kD zzrngNR@RdG1#$yfb(U7v*0lZ_=`fk%Gb|hRr@V$g_B6nlX+dp|Qq(H@+kPDgBBGLv z0|*A<@cnT4{yl=f%z80XqQezYu6t)QcvV&#!o`!e|0{32J(hb@hdwsm+vq5=WvVdX zn}#_@B>hdDcqD3+M%RRNDgEzC|F@`D)^`kk=jQEIGhXE^B)kK&`P{pgGCT}I*FNo% z?e4E=&hE&(w1(+5fzde0AVxhND8-@KpVi*o)O(CHKTa`YpbPFS5QpxYu5<{J>C@wh zt1;;Z=PKguK_Q>z%iZx{vVn0A8z{grcRJpsG)d;R0DkCfG2D+#C3lh=|%JSGXx+RbcEIsXzI;2ND#6Lv=N| zi;N=SV{;I8wuG@byJcCmC(E`g`RJv7qbbg;1EIitM^D$`B4+EQ%wD$gxsxkgV2$;g zfh^v*r8}y%Ksu5kYsMuLhQ>NGM=W%rq|eDQdrw#DU2dzY`x)cW8MU-=2d&Ffq7wYl zz+~=}ZCAe)hn^MZZm}4w17w1 z5-)J+75yy4N)iAm3{A?^d|mY)3U+&*E_zB3Av>Ri1zBXD$U3;Jpe5RXtWJu2Jac28 zu|-6oQz}AlLC7-VWs}~N#{e!OT81iNBZ()~T&$~`+~wEFmaO!2H3AE~`^mby*HO2! zz0I)9brdJ8{RF9ZqMtI*0zdAts45pXBo#9?xQ_%dl-k-FPY}TR-M!K1XthO%^>2@^ z{Aq`07gc#bCu*mqrmEOo#1qOO_;>!eR7jo3Ja4+k+utD&pQ$41cEvsqs~^8ia?=kx z$m^JcWL?`r#M$oa`k1VCBZ$-tn5GEE^%nd`sS+uyB$R|4nc`_h!BmDAM=^^piVLk> z>Jbvb#m$AZ1WC*knTGRvhN;Vy;+94%1GAoJp`a){DSB3r57vu0x3Njp&}jJ{cwGY)=1GZkOda;l}T`IeeMnI0j-;T8M15_@rI z&xYX!mMh-X6%ZJ6&)|*)Y=aE7xj30#D_1%=P)jw1IP>s`N_N!)fJXP8Ub`<%jpLjh zXqD{j%EmqUxL2qn!n&lWm8?>AEQVz?-#xZm?f79wxLe5{XPEDWNAdW#ogwl^<)cy$ zbcwlZ2R*f+p}&u;)b*?L5RQnbb=%Wr9cL4YL_xi01x?%hB1ne*bsWK zrwF-^5O)}klcR0(_es_lCsdiB)?kL)7xI5?d=sm<0VKhs)K=p%+uD4yjX0?ucsI}i zxlr)-HiEG@a>dFg~4yrP6~AjhA26eXHg1To?;oExsm%xP-CF$`ZR?Qy@UYxgqMlt*u4tE$KQwoIy|zb2pXAop z?0&noKRZyAe2HDP{2l9~0s(G@T8U)y2fEL@l3w{fu8#1L>^qqnL@wdABULjoa?kI@ zj>m-F-kpQ<+)lqzaA&NUfu>O!{6d1{b;ng6m9n}P^`E_|eTx#g+jd34K4Z9rUD4CY zl08dkd_Tqk+k7Q4hg4fss6qbeb@(uxrau%X`bCktB<4JqK2Nrll@)bm&lpIFH(KSK zXn>dzHi!!x5&^?4(Zm%*by}}Hb!noTvcSos{9DY!)L@$NQIL4=2 zP^0d&+_4~N=JIaOvy$x{v%e7cLtNLxQ5*4i3JONK666J*bYdNqX~$)OASX`9P3P+T z5*uBTN!uEgda)Y+3?a%ndb^WsN(TpVZ(3$WS^aM#KQMdz{qF&|3n-lxLRgNWM(E>Y zewKLX)dfEO6^0M3`=mT|VO}=$!MW~QGgO=dp~CaHIhgx6+z-AaEP69TD3#|e;=k>5 zB0uL;#808!>Se<4XPACp^)_$2?=aQ#Kf$$Q&{tl0bv>qOm3)P}aWzia>u)p*xdg+B z!^{B)J7?ml>x40J|LJk!sgYE5-5;wz9UJ`%vr@1m+vp|{s`EGe!x(#v6SCKIkPq|W7z3}!U#T7uqOm#bf=mUh zO476SE(iUXUPuVEeaWGtCy?cMgUU6&tE;2hC!;g`Lhk90g<--HXAsoCX)*VF3GAh! zQ-58jo{eHK?WI4Bx6*@wBn=%XIxWl0mw#N}r%y+V8%%!?Cd(o?hR*epPv1*{Y!m zFZ}qf2P`EUc!+m(dOiMqflep52s)fXN&E^TKdK##V>zY<3p*X(&v^B8q_cT@44rp! zldKP)Fnj3?rLa^CY5p(yg>=fAnB#@qNfeD^Ee1$?Vyeo$@LB0;H;Vd!@N#m~Ff4Dr zOAIf0s&RZEPv7PZW@%iC>Bmn#C4aYv@!vgq_jfG+Q%&&+9hfj89j0iE2=&FO!2CVS zBQVYeDK{ZuT%^1zC_aUM=Ei!_DKpyas1mAd{70XbtTRMT1~-mQ@%D9}Ra}b}y&`>D zYWdLlk40bFWQkT@-aoB^c8|BQgR;jvifzO4lC-3L3;v+DubOsxOx{a`egAVONb5e{ z!)d`D)9bXseA6!5jAL1kmUOK#TU*$U$)ESopAi^ zO841|?mwTAOwe$U{+`6%3`gieHE(!ZfuQhaN`WGrRbTswUM?z}zYU4MSJLY(^TofF z$xjeZ=Ge^Q*5-OgZyVUjbgUvF!<@{_AcNXNQl~^sG=z=(r_p-YIWrO?$<4!~26Y#g z;Nb3=LT!>tasJsi8KL(2RtTExe2&c!BaU0ST|jJ?5YFkHm`T$hh&@Lkd~UXQhnP>b zFCml~|7G zK-@ND{QTdy$S{N=?>@7X!r>uB(9gBcE_JoHs%<@AjQMlQA_eXK)WmQx@l2UG%tn{D z>fM;J5ldIQTdquLJ%E^YBL{iDyW&t_HhyL0H@eMICAT0#BGb;jF zRm#lK8+!k^2urH8+{5@AI$7_3>uQcxYiW>5t3JlLycbAfVzLt5pD?^NVC61QOFjfU zyQ_7pZO3LR&f?RRLRCz0?`xmEb^-Z%dRWros?%$ghMgujk$eehf2%fbz~0ziuL2@{ zX7=~}afYZMMRS|P4a^D%Ak(IqNwE|sDI~-z(L~Lrrg9Hh_r?Q6;Pk$EI70nMNI^)7 zGVcJhSah|uWebk&p*Simj4w26YdiG~HB;c1P7VWBsj0cwvww#LiBvSf3^(Lx*4WyL z&N`ni^s{*y!WFHdRYdTgr61d{@z-Dcv-^fah&TOhh*n$uZu9%U_tpI>Zs#8&L)VjW zU7VtTDq>5A*8?LvjIyi!0wiM5YMw#ID?fb#Ey3RV7>Fgd<#UOhf$@x>-thksw<6d>jM)eo+Jxc=OG5OLJjJ>3!C zI0FA-udH0$8^31Sl?>_sOwYSAH0Do_lm9Kso_k=KYgAd8)5C){7(r$5T~H{RJ}TX+ z7+|hm@PaP*@04ukYJ6r!Bl6m=Tp6OBk)l>&U|$AO#8WsTzY}f2F)`9FN^%=;yVu`c zGz2L^1~yGn50^_5Ni2JidlgV+SfUv5q_ug&ZKVwV@683*KWKNKT(=sb?5YBRNeS7~ zKFCre1xdwklG>)C9uF!2gW^HKj)|BOo)Kst$brcxG)>Ha%^x|Ku3PX3q9Lt1-*`ZwT$)QL4KE4d2 z8+@rU4vwaJin$}|i>nkRRrhN6y=hDsXBbR;;`=YgDJXUBAH?V_^}O~y5>KoIEPA4>GO6tW3AnC zz#NoM$xA)@=Z1B}EjUB1M`DW~RC2tj9=R(a>Y5QHchlf1;eQ{>v8I|p?G7>X6a^^_PWn4$4Tbg3z;Fb zqFCM0XLPKd?lA5FCt#1EvU#-X0Dqr1V}rkr(B}B?1%cPnL@J@XtCK7m=X;pxh<^Ze zNR2Y^h^hvhLl%9g?ww;9{ay)I3dM3ze-)Cd63*4TWyr%N0|W_!FB_M7xs_~h6HT(R z%kzhY0t0j}OS*C_9ni<)T7UlgqSuozn30I2Q2Rau7$gPP4&5{rmFx}pa^|Du;wfg2 zZ|DUv*%gDc$a9y9%F4`Z0U{mkHRcB*DLLvBc5Lf>FpYPQnZ^_-o4H0^b?FLtQj~rZ z5pbORwxSIX_~GNmnLRdTzV)P2w`v`=WK&&@grw+b(MW=f?yR6>t0z;k+KEp^HSi(` z2%frsxD=fP*;&J~^+W_k4+ou%-P;Jpg)I@#>o5E1Yb$#`t|62(GK~|PI>`Wor8+cD zq4FKdxTT^(zBd`(!*<8l26tldvldYVUpQ4^*L@_e0~c=KfcsfleZ~-sM6^`e>sa;s zjoFWu;ny!LtpYPw#pi}q&Ja3k2soHI_IcxQ($)?L96P(|Ixs)F>c$+}JU_W;Vw$aV zm_|+W**2r=_UFOKZLxnic~ce!lYv1wTh-;OE6kV=eQ_vR3$v&JSAW7wk`M?R$keS%;oJopc$JBkXuS?bNSFT7DR4^UiplN4HgOLq>!OJ{n}X>tV+4SC=ubvKG>t6 ze1L6dnu6&}a3Hik;!dG8`9s3`5>xAC^aV@~%5OMO>!;_UVQ=ow4tz*<%hFP_J<3{K z5$@_I(GC`4$;Uxu1ieXXM*RMbt`~vOziaeW?F@iT#zS%PCRSjDOW-Lv%!`W)iB@0n6cxAwVJU)~`~sXhCty*UWVMIKXcAY#%-K|3KDB$f4EKwq-xscz zF|Qz#^L{@A8UyX%`%m-15b={t^{Y~8yWn`+&VaIl=Da*-hx^3xra3$`r#0)t@L+6* zZ`}&KWBtiMRYBFpChW%Qg0%*lqrj(KQ$zE($}Vu_T^q65;Vti1c%3=$lOQlf5SSV> z5r#mCo`Qw{$$2&qc~M+;K2UaElYVp7a;gjfkNGHj$XWsy5NOkH%76=Lq1_uqC?W9f zh7*3e`g^s%?j%e6z1U4t0$ThJk0~MFv47kspKipC!H%j6loRv!`RQufK>t|rfLyfb zGx@pW`{P?%y-!crtGb|(ethg>E==BJwakg1Wxx5j)NB`jo{uQ;jpVw3AWnL7 z;7znA(|^YnY0Wq8mK8s`gqY|hA{{%*HYuaEC!(`ed@xe?1{%D)%nOC(uC{O1^P$t@ zFk&+?QK=!Iw9s#filDgK3m^X#N7yCdHq(K9-zXb+L$+*4UHs8b^kR3MAWy#JdBbrI z!)txRr@=T$VkwzK7I*K!7V&;7JRQx@uB&Mc5}H^XV3L_hrFn+9bU|I+#kWIOr}bAy z1}vYu6UgT_OkrM>=a^W%mPw)V$Xv7I6|eSvd3?bn&Ye^LIon=7Vh*(H z6PtZ;PvTF0^`U@U>Kmk(>H?XDgjeIO*Sl>3Js)LKu&p^StVvJjWS|o9#60(xm9=go+vMTIgVH! zQTt7w{yPz!)Bm-+D6_Mv(z4W;5)|^&a1OLu-ahXu2&lRSC|kN#YXE_&b5K!Dwny1M z%fwm7aC%%Nt|HV;Z&sh}0B7dU#3Q4eW22ddsFEIZkoD!($(pt==mSm8=1@ag(Hd9} z)y6aPB!Q_pL;B>ZK%FX+$s0i`!iCe!s zT_R%+UB7xB{@h8fDeiJybtiqy&PE*;H&mOV|Eb}9TIP69zabStM2>)`rfM>RJs3F= z<5Qm+ERK2k_^GSFrRE~5-U7?1qs`hwCpg;h>3DT4d#Mv>eOL`&qluVpFRNe&aB)*n zre`nRe5cpuVGMEh_ivoD$Q88kYGmq3n>Vm}a+0h*_r6Zuo9ul_Vbcc9_ym;`+h~^? z4wetx_33=z+d>`B!(>`YY+&(B>;&dLz`G-IcdlOgq)3)`bY_sXdx2=P6|~|;eH@sw z*hgkJhCum5y(017bVj&0UVJy?Ut8QwBX>oIuL_HLj#iqTR$JD1{_8|T5FXOu==QWahOHa|;aQje?iNGb(x8_$7N3O^27Ihzt{qwx|d z>lxCF4z}kw&n#0?SU|T9rz!r=!WA%l(@ipnH>-*QBrI78!Z2Ssv}|u~bXu99%gRk| zsIE=e?_|S)74kRMS?kk;=hcF23}};gq<$M!mK?_H*V{Mn&(07;pfFk^Sp3?SG7s65 z#5v|1Kt3QlD+ed@i7e)El5qq*S#58SQ`&rM)j+HHG6ku6@sF3wybGF29zKfT_dc%G z)X6{OtgR5mS837a<|_;mD$ zXu`O7rsNm+oy$tuv9=D2J=WCF6cIt5jT#vPKb8B<<;U~B)S440?q-o}&n*JSt`WR; zXl$~UmJ|0YMx9Zcg+?KKV7twB)J;X)Dr(7YQvmvDiP5t8EN?&7Q4z)gO&OA<*BCT&c~ho zM6BfPGr;+1PUnJ4sK$u!wVya5ca1vG^fG)-AYarHkLI&-{J3YM_sT)Xm&vf9+Bzc9 z+wCwPA2_1orzeEGN?kA}#>T>qModh+jq=lpNWsyCQNng?_#!0#^V$gpClpb3#{qTD zv9onyQ9AiOeK627oX)d*gR5S2B%Yp~?UCQvn$NuD+O|tX*K`|!*;g&$vGq&wYDUhv z)RBXYJxkx{0~b%Mu^r>>HtkxjM={ja+~+-Sj@a7dfh$W#0luK$%TY*(v!_=x;62Fq z_@F~#0J;*~VpCpT$@~h&E=P$Oi<>TCH_^0t(R%&^an2UaTxLEsv=EAX{}2-(7TVEdqx!R!$Dgy;cbk@ikEieexm43g%ox1JbM< z@hvt8uH1bj(?5oe{a7=EF@*0C0T?|oVLst0QGp(n;Mf_Yz~7Vtu;a$!pZD#4bekK= z4RsVT2vARgjCJ4W$3$&j3|w}22cye?+VmWXTP@aMStNs!Md|dz1~;s;ol5UwKm~SD zQ1k8kmiyXo0?QEc%U}P;<*xclf!G~pKZ?_>g}#N*{V+1T8Q)JFEhocEeqA;);l=0wdnU&4LHdKd zYSuqZ6dp6g>WvHO5l|$d*Xq$@V@!#qA5oSe7unD?(ZJb5oz2puFPSAvui2C#?khNv zk{;k-gn4gM!67;~o1jpH*08%y@U^HCosRuQDub;=?Y)`1)|5wyjv7pTdaiwT(qbmd z0#X#??GCczh*zQ#19)dn)qi|NNgM%ZBuM|k%Ve-7zLdOq;lQZXTA3P#ZgOY?M{prF zaaO*ma&`~|!ktD$_t1jgM!;&NxvW}O7XhZYtZ9Zu6lqxtY9sGElGUv5+D~#%UT0=g zWu#e&+6|FjdyB96zyXTPzpuLxNgKZXrLsCbDmTA5a8;3%nm+5?;_Bc88cPo9NE0YCM=eUF{aPTpur# z<(Svc%q0;KkB6ors(-MRnRHA|ZjTo#5vUKDd6wl=1>4ZP#?}!8itg#Q>8bmOC8^zM z&TqGkAJ#4^h6qot_MF zmmL@-f))sNbUGXRm#$L|(5i>JCeEGPjv5(}WEJfL7Hs4-u@%=kuhWx&hnhs6`5RyK z0mQ9K`dmoK8#mBT!o2f~%FcU-8`q4x{2QSRpA#ue3=yc0!6e87G;!W%*>%Xr>80|Q zknV}qz=B$48y?CSri0N>>U^`S-dx>6eG&dYB%Y@aFE8jbk=fzAYkDd7%RPe`GUSe8 z-m;*NSnKRiXDu1{uD%^mF@9}VdDLJM<>nFN zFWDdP^s*dJ8!hTw@y{=9JZ~~xkqNPsiSIBJ2T{qn%*6~wy{$NlOb84_nC@`nkOstFx>S^9ekwY5kz}=d{ar*@m!i0e4m%@ z%quPS{fluPw;aKypt@J*jE5F*%bOK;ano{DJmI{S`W}LVuTJ)lA(iD3K-=HmkzsJ} zQX6i)ltIU4Uq;(B4al)|zHgeS`W2%}EA>6L@@@!Zy_k_v&W2~3jYFf|THP+f#SMSa z-FC5SXj$zGvt zhQ8DF9r3ugc;$ri<=A*wsd%-w7ybge)69K&&T8kGqVW_217qz-!kh^%JnK|P zR*p@&5njW!uz>F<8F1KZ#dEWfoLMk+fx=rSmUF>muy|0$ocwsDOT}~e8!b-Rzx)nl z;wfOs=((uU=%p) zv|O*!x(6610A=#-NKRCct}JhmTSol@QsDteh2%fgQ|>KE;Iv zcK+bhF`jnWiX+;EpY=(PeL>6EF>OHd;rJ7R6DEDVa&{p0b`cC$eV#Z~he%|y`l0-= zY{cSY_5r}Aw<`K zCIm-uv6fO;E6p>K06a|hX&nTXea0O@W#PWdju4Cj5ct^G3E=7HbEH<^)md5JAd2du z=gLiv_qznS5~e+(w5pUHODhSk=9L>g?9kgdhnK9?J@^G>bmpF1nwZ+#j}x}%)c68V zdKVBh0YB;sd|9lY6?}9nv1C6QWC*;S4J-Tk9bt0SiC*W|gwDZz+wQgutFn1KwCAFw zchzdC7H5)^0jY$X990q|r1I|SC4})x`#1WJYb(v-Vmtx2fIEBm8|U2PBU18*11oh$ z)qZS_oGLcX7PkpUo>+nw=MU$vepVSlYs=wWb*N#|h^Bqla&F{W*Q7K*%SwFBYHV?hz8HtTW2H9r@AJDvEEg z$S3?RZX)4dhapIJ1kjX|%dHf$b_hH7 zLB#heuO*C3mbw#uC8Yzx{4vyPSf|!ZT0FJ~91Fij)|7@t>H^ygU$?$KEZ!l)96$5K zR+K_;#0ACI4R_&4H%=NyE1BqX8$GnDEIpV>Dkq%&~8Tzpc#z`!je zV_EsI2@sWV$_yjSw5okLDvcA@;KCX5FDRGhT^Q7LTHT|l{`elE!rNYZ;9AugOJk9Qf3bh?Gd?z6dl21tQh!_{u`kZ zNIQXkdJKE6t)s(Nb~4iRZ)LSB>8;R8rTDEc!}~I{Q7jF|{%DG}i-@I&of%b_u0o^q zIXlC_(>Ip;amtIcaT;LUxzt1ybDX_ENH?HwEIVuj+NKJ)v;DxkL!KOwv`k6PeFZx@ zm~JoqQdvt+ps$Z`E+7oPLwfRUpV)}`35(P=qHM;5FDSKq?aij^MeWd;YK7TP@H`FF zuiNQx9 zC_nc>=#52cmBb5^uzzJ{jb_q%j<0P<@wTG5h8?gsaUMozpS#>lX1pd-3(8)N#(D}o zdbkGD&n0t4nkJ?vZ&RQ%d!LZmI4cMY7nd+AT@(stkjD6*W#7mhJu;BhbivihMDJFQ zKcU6=ycV~m*P@l3 zG)nJ-tA8DX-rUL$Z)b}|1p-0VtT>N)2(6?*7mjLCjGlPWv_*r!95S<|AeE}v>+ETI zYfPNh=6VsHCIyx-)RRgIrCd@9?iJ{8%iT zM@+tmW)YQpTfLp?vyK};pnrgw)F?VSbHdRkLj0JN7u40Pm^IXWBo@yCFZN?5*y#&wqDlJ|U!Wb?92Ep@7sd@;+wKmfAO%2Vh_H2T(B zG$?401c_vEq2S^J1;&t_?BaTR8M7BtafGjk?~Y~kZ%W4M{A{6)iET;8h9RikO#4@(9Du~)2F5ZMla9&A=>H@8D9jigJ*k50km^`!-G`3h{aN; zyrHh;9pk~WZ{*HJes?R%!2)0X9ba+zMoeN-}Gi{#Q` zxb3-Y+XrRTK`Oq+fOxR3JqBm)son`KEZH_i3Cc0jicWsaT(iM2^+Ijv;T`5<(s|{? zj*k*kJN0p=;v9J-s2gKS*?trJX~B040>*0O zUr!M8?Ml&Uy0#7^^6^(KX@a6rvrxL3dr2= z9F@-F>{YarAzUFt393c%J3DQLnXxXFO3Jm1A`#@22X=!bFpjEFI$Vb)JHuevNSXZ2 z3uV}4Lxc;9wvy=U5M!9?M)uHNNB~ZC+y_zw-MCVc2B@Jdbc-I1Hfv<-GczlMwLDb5 z6~*1|L(o}4ddt-Fm26e4p2Z9e8|9LolKg(2?=x~F6xpVz znS3LUc;UF2)QT}G$d6z*ohXjghgjjjgYp z*|RCrb6$!K+Gk=jgszfiitx^}p?dQEv-oG}X$G^ta``d@sR0k7h>(#LL``qsVPDin zUnLrtXDL`qbb_&^x-mY)EUhd||Ngj}+a`#J9l2$Q3BRo0nRg`_#NU@;kCMgsf-;AX zDQl6CC$FvqGcHvvAno` z^-l%8pn#swVkQ}i$YOr!bWFTrHD{y}%SSQEn(F)7;?;~Dvz4u1e3&~>WGt(U0 z)?C}!&+X8L(yjuA*Xk?e*)JmK*-0WWN#qiSi=5% zpsh>;?j7-2d=2exp4Cky&`GBz>kB*vnd&89gG>^n%{pZOP2_G`Jbv)^#j9AU$hF;o z&EEaC0{kyFwAs_r$6~sq>GDH|ZZx4feM_}v&rs3cqiO_c3gM|2^O6)4Xysihjr$)i ze_r0KwhT=bY)V%msY;za^lB!JVMAvTYrHD~1dip3OHk|memR9vW7Cd)L;4)`ZL%iN zPlO+?`Hah|w#=T8OBU!LAQ~rZMjmY5-i&L4;e6fD;4Uy7A!T?=^K9SVzU-`dPHJg1 z)BIySDM|XP1ta>DJp@mo_;(8RA{{A1!y2J@!fx{+R|ab%&VnMRh(1Qhu!<*G6y00x zO|{8$ea$%muaJKvkzP^?9~cyT?zLn8o)N0n2D56YaAcH@XP)+ywP1}yL0^8w^5_! zbx|pB4F_JlkK`PRlrWIxaQ(!-*p|@9--ByK<`R{w;1?k9B&st9AaFgL-4-R|)^%;K z(GOyrF1Mu}6K2`SePWvAOVRx6p? z(5sx)F8OUfbY-mP^_Vup3ekC|n4p6EjxQ7;sYdvbZh$37&3oz-UrFq-aWd4ucMI_c}7 zub#-?sKd}{$|wsO4WUt>*JgC@ufT?ojF-KxCAV$ao=eGXagCO)deDC`L%Fht$Ew0O za!3wrFxB3$@0*bBIoUU&_WqO;B8;);aII120nRiQ-|CL%I-sFI^JUkgKX}TJ!bm;* zGl+Xw`O21txKb2!cB7z-H^ZGyZ7^T2`^i|2o8arxxp#C(`4QNa*M>8spUMHQ(mF9g zsI*A>YVjTxg~m8#oI>L12G$+n+o^r+E@fS#P7SM+*^qB|cORF0g*3rr3N;i-IfYoL zG(mPbObpSQ17M>wI05SYPdl6Uyvqhuqt7h`~P_Q z%BZ%&ZfUeYaSFxV-QAr6#oevAyKAuGUfhbiOK@#)_dxOD?)K&0?|tu|tYodMBo8dv3(P5j-irp8fS?zqo$)yE`+nh$7IntMLF)QzcI;O4tG z#>~mkw;rpb-5<=Al}>l?p>V$=TW*3=>)hm!CJ$~rXOG=h%&&%^ac5hr2TZp_KxbO2 z^_QP#saPp-XDo$tK}X_FoG|tS|9=atgvle|L5pE~)B|@1W=xFIMY|tN0_=eDr)`Lc z`d2}wF-S2({-A6|@8n)KRXvue?SOi*nWgYY;1wlcraMHWa{Q)O~Vs3`%7cd?Fc z1JiX1IHjB1S=tUS080zrk-os>)rlGqz8HP=Z%hV5jQEL(eB#p+JBOZUXC_Q#I9oeC zJJ1eAO#R-fNfG$DOs5 z(eK-DC}GwZOq% zY51DCl;oV~Om7`54Xl4Wj&J3Wo?h76+;$QXGouEaF{E73==~ZEUAQ~>LR;E%b&mxSJ>Z=HYT-fbEq-@#%=olSMTX42?E}6 zGp{gl4X$z?8NeXm(%Hq~4dY^2tOFH}zZoa4BDFo&Qx${i8;swP4+$Ub*b#&lNxszc zhZX>J#`-9XQa^Dc{A}L&f~>pvr)CqHFHeaV&VSXwQMeiYv9bw@e2K{wX#F(aW>kCg z#Fe6|%*~WbnkoWdUyG)GpEmANzc-ab3a~8SvTHIV%|(oCV|?~W4-;>1Z21yDbZlow z0P`;84eTb@(caZxLu9UX66;AE8o!nQa|0V7-s|OW`jo%(}wNlg)UQ%av>ujqLbIgrcd41dC@Hi(#?Y$s6N?w+)mhvbp0VV^-SVZVw8Q|_JsJj`3fjtQ*l3{J9*;rg z(^1k<*jvM&2ihdag;{(?&%o}Wt?pb#ozKj)?m#1tOu1Tke4QIVRLc8zVvu8e>R?xS z=R z^@;xiPo4=1H5($3uxP=1*&{DG9H!k3UZ0S|QpL&JE$EvEug|*wz+r|eM~Gwo>>nMd zK>qvl7Y}lnY6r3+X(+#CqES=TW)+z)YsADp9zy`l&C+DBdZCr)|0iVU4%`NP0UEoi?C1VU6N51$O&zK7aOXl>qDfa%^2A;3z z!6RGY*s#N6!{t*%61b<=xX7c5{=~aCx3qUWP%q%tCq@< zFdxkndJvQbhCgVzpgyJ%mN2i!mMm7nOTSh7W%eexR*WyRkVSA|qOGr&OftztCtz%+ zX(glVC9S4JkgvbPA6U?hv+h9r{SL9PA{<9^bv+Na(4=pa+&#!vYif_LDvB?D*9S3V z{m6OEK6NI{;cb+kFF3V4gkZ}=R-cAaILA;0IhphtU+d9xXhBS(f{L-LPtt-E#)V(2AE;zqFd{m$nm%zrGRac@Gq!R+2Y0{`hqVAKdW3*# za^Y)=SUh?5=UQgv(8Xh6*@FxI#6K@B?sDvaXC?nf&3G6;nm(XQ{;M{x$ouKi^yUX& zlKir704jH2!Mr13nM$-`gzAConoC850TAD^>8F-zL8NbAV>R0tAApUedK|n60R?2a zAdaFMx4!M759GT&ohmv##3~`2!c_7J;)~;3KErv1s*`Pps4BL&w2(7Z`%HAjJF8Nm z7Kxi$oFBG6+_c{yHEqvY)>P>ydCA$%=jK6`j^e9km^GI~-yjumS_eEHh8IqEDhq2PSYwgIJEmZq^ zU37>nKw@lUck%_t(GzzLJz>OYyq8%8+>deet;h8?qZeS?7M`_}X{8>hTV>;o&IGLr zRTnVVgMyF296D)6&hX8$=>_-n9I;v6t`qDQ(#7yObl#B>2l=1I>7Nt#xj{Y2Ot$~O zvU`f@q=Hc|+DZzyOPb1{VV6l_V->%{#!~`LpEJw%SfPJs*2OY>7W?BI&*f&Q2_0I4 zDjw^?kqbKf;T$aMPK5{4ShVoeh3e75SY_8Wg|4XCQrEI-wmq1nLu0kzrCIcgXG}-; zZrKryWAtnYOAzE8(6=?ehK1%}8i@+^TVBmgvU_^}cdCZ`K%eTj34|<%3AR@f^zdI* z+tDI2TpW2yjH%v4o_U970?Jz|I zsZBELg|Sp`r`-wY${`WHrpM=`w@M0DpL}h*@@7}VS4(g#8i>psDG;OO50-dc)oQ!h zS1_v2?Ct;T{Z4wKhI(MZ8etn{N92o*9n*m;tif1)vUd@^WN+b3IjVu9$X_VIJ*}32nGq!ot<=g4y7TbZ!ZD#h$?M58YoXk0KSScUJ4oa? z7YJXy#NSFgWxUjDB+x)!{C_*7rUOG>@Zo)E)Kog!FLNF06w}i+HO!8VzWzoDk9Uqg7KB7RjaEiZJrKTAp0FPXL>`Ql=bm?z9XZA9t@L2BhH zQQD6@$rkQ++-V}xV@E6BTgdFYycHyMkRo%gS6&pm7puaXB2E6s=dcqZiNIc`@+!mh zG{0R=DNXcV)ycTsj<}lK{h89~ec$~Jq_*+a#s>UvRE(%zBdrwdX^GqCXineblBdc$ zh|v3wcEb)oy;x|BdhS)Y@sV+`U-CHG~@8Q6oo6|&QW!riirxlvY#pTJDu#r$T8hj~-bm+$S zxpURkmD4?k*NUNc6;yW*tvCwA68V;RWzr{1MTc01oY{Pw{vwu znljmART+|U3=GVmP#!#6TifVXbFx6X%<0Qh`JQX1scIT&{yFS`4h;qfj%Chqwydw? z(Xnf?7Qlw@*Ma}F%fmBxb3#~T-NCj%invtC|7~)=TV$H?PzhP6?CA+XQPn8zrZGo- z!N7<{s-{*PicPXo{um}t!78F!OLaX8yQHdn-@=La)8*!#q6lr_HB;EY(x{HfP5x51sfe~7;K-ySnW&Y1XRZ(U>>BZKktLlDzvK{Jp)PSWtqN7po# zz+VaR#>ucNQTsX=I@`KKm|a+e8$lZL`n4wMYAWKnwl$*n`P~iz7<{D9?d&`R?W?l4 zr$%H{avks82LEk9i*?{OUC7`Ig4cy-7HJqciu<k|>L4Nkv=X+dYh!m+A`&&l9GtX6JGG~T9sfdmqjSsgP%1Is%=`UP#9H6!_1fbA$KC@@ z)cFh50AIEuOLe4R7NKA^0o$I8B`##fATZHXXYYZgRQcBZzP083PXG1P77+^L)(67u zlzq)`&{7D9>pf1pRorpaT7#9wKeeu^Lsew0Q(|Gs7)wBhXL^kNYa+n)iA~~3@qTt= z$3%NwOtxy!))3XHIGp0|xQjvh-w`ZwZpas?OQa zF4W=vYfnai39zqQg25g&{u8DkFSq+aU8dCcy#Mwzdpb7_2PH0n|pu1Pc2lxmzg;YE5D@sfP+^4y-t@47wh=-;fSx*_U$w?3VXZ5<#R@v3d9brei0cM*IMT;risO&A0fcE`2gyYZimz5uTqU4wj;lCD=M zh#H|7IC>e6Q83_uy6nU3bR{zsLOGeDUe(i#eTzL_kHa}b`yfQh zNOEQ~71FN@+gc|(lyk^(kR26grw>#61pj{U$3j`(&C6{!zxRD^Va}E%|H8h~srrW1 z!QPW=an_c8sM?#%{cKi8+wDzE)~;r{2`3IIryeJ|$7|PDE#TFa%c03f2nYGUICM+vRG^PR6^S|(Gh=eZbJseY zj|L!?(D`rl5>NUIC@Hg}lc0Bcw_?$rR1u#{Dy~co@+0OI#(gw}D;v+*te40W4wfZ3 zwY5L)5$d{qx3S4ol5fnLkUy?KE#4eR;~j2z)}I%01eda>4%j~dV02Y6g2N1JpvWs# z>egGkS~zPJp;b9o&93%(11>@6M(s26Yp(+prii9Ox$nyltZi_x;K|8Icb7N+wu0;d z0nml1lkMlF9Kz#C3#wT55~{j#yioI8Vqc)fjt76bgnr5x7dE44G+U3W6BQ+>?4{H8 zZL^c0uJ5Xev5FdAB7Pmw>1~%a@ZtLD3#~733iZKF?&cevYBA_mmiHoIf(0ss_m2}k zU!p-Jj4w4q126}IVUehLM@FJ6kGq`{)S~hl`I?$tJL|?RTV$}JOH|r9khI-~uf`6R zM0-DG^>L?p7ZC)rvY>t+<@i6H4`wHfeB4yAa#WV(X%QMZoxA;ojv((C+~*UXso`*S zv&f-?yOLFKq{6dCA1$m&=B72qkSROd^jgao4d|-jUjc8XW~3=Qigxkx-*}07hs}&( z-bSyX%O=*zGN->? z=gxx6g^2^s9E1j<=DT)ss1hb7g4|C6>AKX9X-X2%)iWo0dmcdO#{Q+f#rtZLY*KXD zDIbTC8M&VNPIi>#Sqe_}@Iq~vo9|^_AD25vyAY22e&kb3AF|L$Ih!m-;ib}T{?^N* zvXOUbH~oqZVKAb@+0gq_cS1zxx?naTnyt%k8>oN86XUCWl_0Xq$JtXej?AZ+i|&QR zH8G?=peLZY`h~x*aCQpAd(%H_lssaHrtO7agFfxY@PNx8_za4o5-VZ--P+OA2J}xZ zj;D>{<9c%x;{H@vQ`kd~0fiVr*^uA;FdLmWHFZGaODEX+|D^+_JA#Oa2-EnYkk*t0 zkY_`LSHdmfh$R{gka);fHfh@yRTQ29F zO>3!v?uu6><-~OzUU-5aiHMU%q-Lg<+GWjuaz)j-{`*t9f)TXvk1SWAI&dAyC8h8D zoGX5LB@93+=C6kU3`v{398+36^?Y&>lsvj!px;aYw3z1Ig4l)fmj4g#t~=w8T`yhX zfU7Gj{l9zwjKZuubc-$OA+r`ZVWYNrlJBLhW=OiK!BbhVWGagc{!K(9k;?hxPnqTc zLGL5uFl}!u%w;#?BoJH%Ri=#f_!`^Z?^*9jFiSmS@`QvV{tUOzb3F36d9KN<+&s( zO{G81Yu1D#*WBBzo%+~9-;64o{}}CkV@R3QWVSE~HjT0`(%JRwIHfQr%NmnPsj%m< zN-WaOEBp7YrihLQ;Z-Tp!JH8*UaFhrQ1tbmpAzuRs`tg&*A$;LQMRDYZR9GaVTzH& z3aZfP7)eUi`tn5@xvdhR1R9_vUZ4n+HSa9~tkf*^eq22ez;b`Q_d`f4nTADQ?`?tU zn^ZMPg^_p!=s%1BwJ>9^$f00S#McwrG#@{5BqTv>BkD`_s&e?#qeEq$}gV+f4a? z(BLRCYKDO|fnfM7Hkk!5Czg3#AJL%)e^_AM)l9`t3dF7+DVxPK)_cEY@29Tan$lNs{A=Md<`fW ze;JxS#3|;wP23jA*wZwc?dB->s?PS%quZ-K8JqswpDqJE+zX0{#+3pwZ{p?0gl$vz z^8`L{($&k1CVPsk8+5sgUi5@6qt)N8cVk$CalNN)K{0~PxC(c(6Kbhv)(Y+_%_f`$y67{DA&-HbiFy`9bnU- zyUcmdDrYM*fea8aeMfb9Fg?UtMg9qiTuPI%*3LcBEJ|>GdH8B$Z$OPEMc6t??9G}c z4fK2N2yB5Ft$`+cD*q ztnufLMI|Z;c@P%%tVTVi3Zk8wG2>@=9gm_)t1S_oU872mkGpPEv;NxbmjNaI+tMwd zJGbU8q(;un)8{P_A?}D1=ckmUVA4QRu_tS}?F~PrXsB8zakRsQ6tdGu`3m%DJ&y?r z8)aIqE%I((cc@C|mzWDF^Mb^!Sc}~^gzDF>E1|DW?}=q$?74qF4|G*e)txJ;FBZ>D z_vUM-z5WXr`7Z@cJX8z&3a2&TsGQtwDBbHlg8v)7=_hnB_YE#?M`$H*soqv_Bdq_k zOp_yFgGATe4Vvq%VL>q=K|p`Z*I|*F7`kRdX?R*FqTl_ay*cj}7qwtB6sFp6vcMd) zPQOdOHhV>kg~M*D%WXIJ$9icB;>+_z{aALx>j2d;R+^i<2VA;&FAqZxb|NnOr!56+ zu?wS)Z}|G&LLf#Nk=NzWpdvb`?Ri+?ZU<%`-9R>Bgi7f{hEJ=#wP=_%W?Oo`&u%yro)X;8*Wk8I8kWxxGjjW-qgvLji~I=o*473%xh0Ka!)o}BXu zi(+vL!{0^#z?P9QI#Sw_&pd!zaVvsleBEW2d^k?^L1eS89L%cDjA7~aG+Rd5ktDu` zT^(@uYEx!djsD2o<@|JByg%vSof`89=Qi|ndlUu%kI@799jgy*D;6S}Ti4OmgC}}; z?Ee^rgA0baKN!B5>|Yb8TR!mwv{GoF3!5SKv+76UR4LrB<3Dr!dZ|Qtmo}YX4#n8M zjz3l2{y@3RU7P;?9~`S3%i0eeNI(Yo^HbueP}iB}T`m}dB74z#|9Y~K??L{3H2vTP zs;G>f-`sOnU}ZLI%98SGZ*O4i!fE42Y$5I0?%PUmF#cIdZ;>1^7Myj+X~8FRW2jEt z&w61V#NT~i%Jw=m5%&Ers5XZ8 zgyi4R1b0RoYwtd^6U|Dw3SxyN$f{cm0HSH(%(YbQD{E9WxFI&#DK-T+ZNg_UpM|yz zaQ}Kc3U}o$*;M77gqVe}YI7+y3IFsK`>&6QIYzq$p|8JcL{I#=XBhL#< z3?j(<2|3mkbU1Qs_^`2a#&NVr3<`0!XS@KxJh$I%@Mb9fX@0mne1C#!yc7(Xm8oww z{M9+GWAKc?@cgSeXwD@xrHZ%_`pl@V*E4HcfwJYL#Ny2Dx7` zuNR=S_-CL0`R|3~uAc;9rQRBOQ7;z1H*j3BiWaKaH{&~8K7%Ur0?4gr{wL4XWJs$* z`R^5PEjKf0iTnrX*-zwBFG#yi&IrF*q2%cc+?5F}Vnv)_D%>mWip1zBDd@Lz{(UaWvQF_ap__!*x1@6SQ+c6CT5jr%Aji@#zE{O~7 zH88`ClVB^JwgUBf7e66a|bkOeX8#)`=6B@U5DdIY=Rg(=va5-XBR*cIZ5Me zAdsQ(n=__x(pw1W3X}FK)^@~T=0ZEFv!?}8^acu|UErHrA-q5uq|XP%S3xg-IlKjy z@MLDpF!9ZOqyyGWP0$Lr^OSl=g@KJd%{^DY$_R}SoDp;Xon3brax&T}oD@3!+6yo= zDs!{^;ouYSr(QoiwFs=tzKj!TUdkP!r%=H85>E5v-N4?sF5$dz(CqGkVr`rEC3_vn%QAe z)Mr;(etT%(ns?Y>i8@11aP9rJMS-ED_(oxi4s>&tTLXDVGSipu*G|5-$u{qHLMc9n zO(%Hfm6dO~XOLn$mbh7x2oVu_A-{U`naJEp#P9x9Wece7nC!B@?%Wg67k1PEx!WB6 zfQ)7bwtpaD5#Dm(^!?qny|Rnn5f*!k@4@*9_|K({rpIz64rg5<;=ed#e@)JKT*5Ylg`B*=|Oat*W=!+wG^RS+>HdYPU~8ZdOdB=0n~!va*> zU!c3&{+YMe00+Y)kQ~#{k1U_x3+189E0^gB@#|p?tSL#;)&4f?0rj&aw2-G=R35T^ zhOZcxBFWJY3k4OGNn$hQgZ-LG)|jNVNe+$fdq0m~+^|zcNON|(&n6!n70&A=cC`(f zZ98oS;H_<}kxJEITqpJ`w47@?-6WcN;jXAEG#I!l{Z)4N`uqzh!qbYsDKS8j%h|oM ziI)PYWnS;(8WQ~PZiJN-WQNwfT0~I3_lWuW{)e{53{&kh*8?uceLMSZ@yJI`)&3vD^TY%^ zZMqK?mn3{e?6jC82&()KOa9B72-3#6vbY{3^s-uB!j^^x{3D@SS=sctd(DhkTbmDf zbF(9S5v^M-{W-U_I;%yRqi^)kp2L$*=>M%0Pa>+EK?QjL3J!XNdeEzvDr@$4dGK+w z0HP!s>D1qFj0JxW2`5$9I8BZnF zcWs6wP3}4dpK!X$F^x2AA#W)5K&)ZSux;rDINJ!nPR`Yhng*NlXhRidZc8Ff+Q0N0 zM@C8n0F07U`p*RuCe^dI!RYh+)F^B)WKz!os?zZAXjA~SZ|;D^3fqF#dv9$khV1T| zZ)m8SCEa0PBx45=mQj4apB3zIm8nG3u!gx(r=siYm;mq;;EA&u^^?1JbOaZ`nPUfz zamqGd#_nTL*p~uTG`{;0m3{w0E~d%taPwO04c|lOt->y`h!61X;8K;_j*jR&?03N{ ztVCB`?fj9oc}wMHTkhg?$03EWUY*t4HGnK+AxHYkhTiUNComfvST^%Z##8E_8sV?G z8%bC+ptqN+FJo66e8|^n{m;WUh?u(2u`qeO2apkQ&vBy2F*VhAox@msyV9KwNkp}= z@}h(~QYh=H{JLwYj2y1B*(G?`tVY%7)5;=WeB{vk%A%EO3Mtt#Zfq}pcnn)6i7+8$ z;~rW6dpSI%p4NS==O-=X!nN#ti*f11LzKI(7Ifglp05Gn%x%s!)8BC{|8t*YGcEWF zB?&mtT2?46H1&~0oCobKT+1u4(fte^-Hs|z^NTeB7kTgzS;eE;kD3*`HL>Okj{j3N zEf|Gbh*M2Y5fZ5zJ;U$`;mFEJv!=+DFNK=cHt4$?uQv9k1xb|u+D(N|Nt+d6Fjs$;(jxFPCY*g4Tj$>jB zUEHa=;tbEftvqNy|&!fXwQd1h@J&EDyD)cqdC{IIz&YCJj_-M{+{Yn684S z7z44XTbeOw5*6Dicx@F=4Us>@?Uc*Mk8b@8h$6pK+-iB3VGO|ECWbrgCAPJ4-8OTL zU>*5=aYHGZGmYqTV*v`Yj%Vg9tl%|Ar^j6?31mnja`SRWg+jbON6nBZk8A7PfPFW* z^!aI2DjjL6vlQ*8T4^&n4BfKK$rI7(&m#ra4JH5!X~=PITTk%ymd}uvz^NSEv`Kv{bR-bTymQdS2f?%@%x;xHFDNEKpA)iw*N=Q37&jWVBHg2Q3dC&xZ}%-BgaeBSATQ3X zr3EqL#u?#K(8l&I=2fe>K{7#scJ8cO-mMREeR1TeD+mqvkPgF)Wsl!A>U8!kZ(2NY z(T7L=!IeEp%aIwW?gq&xV7f);rV2?MlUEM_+uTfl0)kltDVfscCu?ehoNtU6a_My=V;g$C-;zgvXlu}1 zLd;^i4;^}-Ke$^o&4p$1 zYo<3y7mNTZkE8SAai?>La`hhjfghsocCRTgm)0Hnoy6&BcOcn9>*+je! zhW>{lft)2>c-6$;{v2tmkc5IRpCC$u3O94LY1DgRznzu4NONt{`0n?K@WOM?w!sh# z#j>Up1;*aj$I5MWO*ti`D?j1EZeqx9H1SQR`)SE?LHPRGuxS;KZWP4ldA9vXIBmeU z1U`2azyMSzdYZW$X_FdTbvA8#0^i5cjZ!q9l`mdJlE%o;2t&j(=C5`Yruprcj17BQ zyBeZzR|r4{M(y738zx(%j$4HJc^|VUtV_#}2A;AktaXuXo4uY^+|53JkdUc=kDTD2 zAIn)ap_d+7XYOGH<0_$|dU$N&;frpruVR;j_DV!Ix_aAtp5}QW@LI>}saSD#vXIrAr_Kq$c*f5LJX4ZyrhqQG@Sa7dZ33aTs)n;!DvFuf3UAG%g z@AqHp)@@oYHA*u5Me^jqxxc!fO;F5qdYsFdyjt42-0v-BPB3fI-Dx=4dtJ@lcP0;~ zyot`eY7LAH*87OGsqxBLt-Slx(UE68?q09@ZzSKK+hj4?Ed<=G%zPa^B)#&`C-B&P z;UrNYY^0Cjlz#D-suMz$UO~xjapOtjuBWk`wFrq+PS^m%!hMy;LsEml`^UdcqX_%} zo>q$9PJa(>>=77cs;8l7bJjc&k(TD?8*~vF42bXjAO;L;4v_#aB;CE~FsQAnn7 z`cKzW7XYfYzN5(2+`{8MGOmH=%VK0PFKK+?=mFCI;WfI^JRUimdw`K#WG)Y(l$`3~ znG{y~a9yw?0S_-1(aCv^k%1Ai!*jQhFyVd7n8mNwEx-HIl6hJx-8_BW_4r}$_y@lY zFmv3XA#uJi9{G<_JoVJt)`R?>r=Oz^F+<7drIzGy#NWRU8fF<^iHfufwKYaKXEq>1 zMO;R*iUtK*Lzy?@$B6vOqD+Mg*t{>5sgnMsW1!k(n@K_TTRMPD>e|(z?<2#hd+uvB zkelae^-VPsqnI=n6F=~*qlq<7fLsrdrsC~ulZNYTr9L>UF6kk%HT`i3QeskVMEjD- z1SO^!J;@+Yt(7)wAt0$SkVs?7!-m>cEMM!Bh!Kl-F z+dGIWQ*RYf5)LNt0@RadY~1nfemK9dwg6J%`Li6pfjYwPX2h%O(XwlnF5Co(K8_$c ziYRx($rbx?-w{o6x4b&r4s(^A7<^fTM+;59$5k^RZiA_(@gh|VMs}Om;qwqmV97={ zlw{+U!J9f;^BW6#q&bF0!`@}xKp)=3G*EW6lv!d z*20_MhWb|V@g-4}{leP&+SZYdl0|1K7`{B5Ih2+4>HMIB$A)UENv1lPoo~p~%if0F zHO!C!+=?VtIEOz!RK@^F%r_tsgtt1?upLmPeW2Eu^S=&@r z8KQ^BwO9zXIwrhKm?TUe8KH&82`-Y}`&;SXC>sIyTvxPs+hh(7aKgO}ry}$|K%2FM z2{c7l;L$jC2pL!AWJGX&J~2L~X3a_Y4YEx{18(+bJh^msjgg&5tEn9FO;I3YfCVDo z0iXOgdwow9mrM+)()_uENAdnSI6YNe^kOOhKrH-+Y~PiIh4HMMktfW%kRh)ykIdL0 zHD`tiYviSDr@)a0`TWy&nXC=M3c)F@$U8&jer1G?~A2ZvAxPnci+%6%!e%x=4 zp6Cl^4I2EC?4!eh-yUT(6q;QwnUtX7Y2$z=LwOE(BWA`cj1JdKD+g{Y|CnNykW2f$ zbTHEEf0GtG?eVm^ayI+z`mErU<39!8D|;XDH9;{7vcFw@x>_erbmbVyfu>A_Gxw2)C2f4p(r}-s( zrmjx)-y9;oAm=)BnRhKAqJjUB)58`|*&k4T$D$z}dO- zN$_X|HC~%oDou{-V8{K6^*jl!Z*OdreDtFAp0V%ic~d0@u{(zL8t<;NnL(^bwkYWWN+a5mwH)yJiyaVDZb80*J?HU5WC{B`_3_;O(&jL+d_o9I>9*9&aOjv*@+qIYpt7b^MyjMhBZ(4+?4{d)Q9 zEuAggO`Ww{Sie&#@9=OrUgs~mPd(SRfSwQNyytix0+x@<|7~|$ z@YSb9Rh=4zI9-TS3%4p<&z(kqtuDkJh+|}A5=3VuT@7avJ*Nrga*KZJ_LlT2c z*RithW1Y0ei1aeR^`zga&bqcbNfOyTf5Q@oHIE1je!N9?clknu3o99OcM(7W~mEnN-N&Jx;gusff* z%pKJvcKHKWNo_dlgHxFd%0O~q|1Hk+8pU5Xysj0 ze~4@TR(|JOCqnf0c+9LB1S>_CITh&trsPBH(XmJC+erY^Q*t+1@kFgbqa|sY>SJPrg44LB9IBJB6A`4BEl0t!sA8hb`_F8sS;77V4+G_`Ol%!48 z4-YK*2tyUWQQ%NroK)WU;qpq%XYHl`Y!gSskgO_X`^!e=E=Vwl;b?!cM~$!2il1N( z0Q=%MU7fe&{_*u*KXJWp2uVRYB9SSasb7_#nyu0sYttiIpG_Fj0zTG=zl~AGl240f{pt`Fuxa}oOHANA z|GFS4gyr4UHh)&{VDD(y^P=Kr!ybWW=*P#y-b?ipMzD{WYaq|DY!`ByXR2s}5RC|B zp`EeNA-ly5-W0jeFr*5<8`X@o7WU)V>UDT1XLFb!RXI=-E|mfhMWM`EiwV=2b?o5C ztNN`_jiCQax1rF%h*ie#YJhtwyG+HR*;OaePkD&YNkfx85>qB>kb(SRq_nN*ilPF? zB38F%=W0_aWcNI7TJif7d7dnuRbz*-aZg@`CK~C|Ub&bVd{sG&xENAJP1Tu$gNBCd z-L|(k<;0Z`L>JC=|8n9LRmo{0`m|l_86wadx!ZmwN73A3?u>d|6>f~=07*`3>@GtK zIGbelXlbx`Zd-Obbj|8yn|2Cs745lENlu$OcwRb?sicz&%AqUd7P$!DHcy>XJ{Rf= z2!b;U2%2x(h+Z9jaN3|>5IJ@17@0*e^XVunzN_o~s9bmSyBMnx-Drnpm)QzM)A=uF zBT7;!8ysi;Zc=c6Y9bo?cXMds;GT!cvYEicrC;o?l+(lW{XN75*$8*+SKiqfQ~OZ6 z+3Ej64vq5l1i%fAQ_|W~%dV0lZv|1oLQUf!s**zqk$x+|<>7(g#RXrmzF<|o5RF4->*D765y=HR=TILOm2_evdK+Og<@^ZfkJgz+Eu-Wn>%+y^p1RE|2Lq!e+ z-@G9Xp>#Gi8ug6H@AGc?ZPnrWR35)qif*_t+A%2KdkMyF5fOM2WiTL6#mJ<3Uv1KE zPPMoSKCCg+uVnbG@|CUgCf_E)>~VNa-wM+bW*(369RQ0+7Oib^-8@b0`)cZ zJsRuQJyvUEi)&AwPh1~vBXD{5o8nC^ba+aKJPo!x{~+h%-?$m?ld8!r$XStmpSW#p z$szd%e7A-}8cGtwI&*MBio7j^jJ`swVUy_;*_;hVO}j+2z?&;M~hBC!mwhvVJbZc2R;SmdK+dLqO z!sZlxNv&!8G;=P=sqB6zZ_uar)sP;Y?~A)vN0}u!&4Q5zGro!q`y8VuTss`+?eU5w zIW|wzzv0YlOUoC63NeU4V{ccWszaq_US1EbCG>>vScbc`>ni&5vvY}K7P5!9@a98K zRM$;JKo(j&bWg7h0upn_#AUGHB2^GlK;a&iS-igX7G70s>D(%4n3_rTba`taxj9b` zVx~!SWR1Ju6oBJeoA&vPO+0>4O2Mh1Y5Cq_aNE}A*~&G+QYZW?OMGqPMp_jwW1&0nPKm*a=F&l- zAa0qTI}Q5am8MHt8^fnr6JFDAocf8eBj=~@r~WpvGd38_PnA&{jAMUNMmCbIZ&RGe zQ#~;i_RR7!R{BXY#9Qz~d%NX53~8_x;GnK+`2=VA$JK!xQ2Hko%PUBak{sf+vPN)H zc9=n`G)dN>aZVT1mC*5|{^xGot0UGgAO`7%6Sk_cab5s0dWg%L3_9r4k#!r~n5z^k zZ{&cON?G!sgSU|suO+QEN2QMTUau{EA*e&#iJFo1X)zm&XR?X45pfy z)JhjJFer%7AcOuM2A)>#b7KePJwf+^BLhGq7nV@I$rt2dN0m+_@^Q~%U!C-u#WJ>$ zBSlAd%kvVqvQGl~vQ`J()59JREwFaPz=JGphW2uOva!^6M0SY$!1Mk8G4_{XRW{!n zFwCteAc}NLY`VKkwv>cOcY}0yOLuomOZTQhKsHhv=>}=(?#<@8yno*R_xfvs5NEJd5NT>THejXWA!7!UK$PgYR`sRgLV-f;~X zHn4hA$;hw73mDfSUt^`rGVF?!ceB^@oW}FhbdUFcF3)%%Jp8GP=pYj&&T=cWf#8cj zax$L{X1jo8D{iPctKaU>RmXbeVRg6g5Z1zf+aPv(YhkY_tuk3SSXP!HAn|pd|A(31 zgSNd8^qKqe!?a@_X3vlM9CH7Y-DcLDeLL$`pY89(Dg!oYRW{!TN_NCNPxv$B4~tg% zzcOMMppvBa>!#tpQlU!IN{tB*|4rHL5G$q>e_kFLUj8H0Z0BWCGNH->x(SHTK6W}Y zR;0UQWdZXZF}!-Sp9cpe4cnTFy-1}B>&uTX?^h}7Pf4MGE@Pv<&l7E2;9f;&+6ZDfq5^r@;Sx7i6^9C_96HW&i7T<1_;SndyS0F7|E2 zZqT>QOfP&7N!QhMQjmMG%dFWu)cJL-olW24c8TGA!7$0`bWCs`X=re;jI*!2+SWtE zK6>*Q{qAhKz{SX~Xr?x=(@hHen{$-DSTm#DFXcaa{fAW87$Ryt?*k#Tjc$P^&NA)=z!;KxQvVn^}L&|osc-<3hw$YJlhBa9x@AH^Vt$M)5wPV3Udo|Loi@^QD<*74(|?XX#k58WSX zT_oWMK!|xqS7l0rE!W=K4v;Op*@XEHm=*ob?Q3U9E81Ql4*qCHeRf!}in@*z8_f8= zyuhI+?#iW!kI&ZDR?lA;PGhIup`!)Vo6i7L(#K7F(|j}P&EKusb#$_-zzGrajcAliZJ&L5k+7 z8i)sSguaH#Lo;jM{kS*zj=N5}%Mc+BSQBy|*9~`gnC!s>(C6rR30JbZNt_^Evj=)ZpoJkvyFw%I0YJLe=9kP1)s`1UK#Ze3&} z3lwf37i=d^g(9@LKuiKSy#Q03Qbwq^(6&%1STrel&QifaA|xgu4-)xG0hW7kcw}oE zt1dMuu;JqWI-$tBFe%l@sBJ(0X^D%QvLSZk*5@#-o^Y=W11Cz-S+q0z>(F2a-6t?OG$}4Z%s>BoOapciTzK@VD^|3Pa| z@+v)kcfP+{Kg!!&PQg{qx3aydtVwice(59LtH;eCMN(*xTs7toM?FA;`^Qo7>Pr8V zPzbVHI?07=ZR0mb-5!C<546O%LZX|GY#tiA^(ip;$+Cg1S%d#2%Icc~3-4WcMzdI_ zc$w;_sec8*ensF;|8r$8NTUVp!1<%p?cM(3IJ;#I=BNXv)4+$_OkU?b>k4Yw^9IES z?>A|Vu@ZV}YI4!6)n%x0KCKa+`ph7?=<~AO*0K!G_iCy4D!D=5p6T^B@VFWdV%EbGQ1yo%(iXnGP58`krddMWvYVv zAiMzNd{ZC0wWHt2PV+i>OYrmK4#22>I~cmFLWz6NE`*CF;UH+T`SM*NU}Ito@z%Rs zt7wG2x69%B!PN@b*w=MawkpgO8@Z=i{T6cJxcXR?nwO>OFQ~CPGG%=448PLH;-@DL z)5Q&8w+itld?@+~;loo|Bg6vnCk)Duu;!BdKDM>h*Vo;%viVvWzC=pAC{OJkvuMcq zbG-WfSq<6pBmC+q_4{z+y%0`Nk#$>jy>dV#xff`pGh-1n)r zp5xYm+?rtrOG0KXa%e0=5nB(cDrS-;;)DC^hmP?ZmSd&(!NDEhb9~tI;Aew{*!}KO z(1t;m`PAba?t>h#kl2qf?)Eb!6G=~Qo`%FZ8_RH42)6=^?aCEGJ)yG^B^SuWc$f)_ zeR!4%+5aF9lb1Jnul6p}>+&K(jt)if@^XoYj65fuHgmfs25-MnQx-AN!3Sh>8WSeyjrxpDY(#B2NW0M)=Bl7%HqSoN`6dXjwrDg7$u;qwwaB!s=WXLvq(a3 z@tLGabOfVLESD&U$cInVYOnSe0Dc)ge!tCw1O#x4P~mcxQ0Z*YKbcZjr@$J&T+cSu zMXdx_Cxu27%+D?Rg3yOQaoP&lQ5RTeT502-k+Y3zPmOY=RkSH+i?arVojn@)+;j+8Gd zIwXva=)Cwjcsd(L=C0)5j}TRa2ajQ31an`GZ*Cr}M>5@{`6-VD!I^>$AD=I83FnMH zoF$R?+Oh;KA0l3nFJOE~+CMNfyU-z}`?cKMc~&19Oo|(IzOwA)au(!9DhiZfL#6{b>!WF`d{gS})$ceS=IQdP?7wp?lWd|q?N8qv!#~Pl zVQxx6G{=b@A6r;g#D(y@!Yj#jfGW>Ne=BIpkNtSi-IN!<ZPg&k%d17_ z$Jn8#rdEDNSY<hAl~+hTdvS;7jU z1DkHw>->*u$E6Sv598V<%2XI3pfM_w7>GqJzY8DQn_3^*4aj9Oj~!)Z*>Musq-(&6 zrVIcSOjTO5Erp8M`xgvW!r!kUUaFk+#IysPT2J*3SKIG;SK$|Fkc}In^4+^T= znq7$KH)~`YuCdh8Z5eHveqHPT7>U`e$3xoYbk-sf<T90Tox=){r9ic;di1Jd{Dz zNvYP!drRMi#g_75VQ?==nc>zdKko-bzWB1Z${Bkfsz$AcD+00Y+QWaTNVoAw@-kbG zSv#^1c1pnaA917B({&uG>4!1`^wpGrn-g@Soh#yYb;cW zADwk&X=9#%z%BJ+-M}{fn?8elz2rm%2YtL4Q>PMZZA2O>O2ad5$rOgVHv_dS{*smc z9=g$EYWBu@+SaP?9jvx5-X4$VLKKhGtE*~EB#lb&$}0JnXAk||WlmA>8%x`Z%St<4d2(CG zaSfTo?nTlOwfEBfD+Vp}VQx^7m0xano%5_Ng2G`_0422w&bGukYYd+rfX&fr!&Xd89n+ghT|}b zU@X*CY|t~q9&aa`1B%I*veI>96H($U@F_D~_O?MXytd#6b`$AUkR>Kveh{MZosg{rH8zgmm8m!rV_kGqf zYG_g~3t!u<9NOFO0@^CWMVkVX$^4 zNw@2ku}P(1U5XN(S~&J`e~6)c^4MmF?-R{skvGz93+rW7`>&;IePo7%o|@~&0U~+rL$vx-DlOyCZLHV@HWS0oc~*<%y2mr9>J=H^rt)oHjJ2f+8IArOP;M zf~KW|--m(|EAV~8-4IY&^`Ve5ipz>G@eTry4kfYWbdTn1PJ z5i8Z-<$z*i|Kx^_-`DqSdfVcNHqJOddP;~9hQb)&YU+!+CS!AUw|zhZEa@lDQAa(n z42=Egm7)vpOt*s*yS28v6kYl-EJxJw^5FfjBSV60Hlz4!t zv3+CZYW9X1b2DC;lIkC>05d){?DQ5;_$J2J|Y z=(2Y(SYaMVMSB!0PmNP{IJo(}-7xbQ5G`-YKQc8)1If)EiRwn%D+lkDGmepyqgIr= z4=#%q2BL?!AD#QG3rBo{OmyE|+yQrxOCb7{_Mu9@-7)Mpad7t^Q*eXr*4rDJX{9%- z3q@&f`*4A+uZsaSu)gc5TC^rPHB9}57|all-HI=fa+GKn5SYdNmSis!3vuz~Z zZo$|9r{TQ^cx!R&4ceR9y4IHUPJ5{Z16R6T`H06A-|Rj#@aaDwhH0f_CRMjjbm9h9 zTK!0Hw1s^nsVyySH^}`Eh-Uo`hoXw6=EDAZ(tu!QEZ|g%RjgZYiOp6 z@UG;?dDQT+LHBv7eaA%(6G1cQ{Qfl=S_F0k?PNx`|Ng^q*A3WTdOzXn)8rX(%|{Re zHnxS6-=LYzShO)Xe1Fc~-;IfMVsUT3y=(C;W=U{K1N8BV?=c-lbQ=+T=c_fhJCf$Z|B0$K<-_Rsuir9wL;jd@*@x=MtUzHu9}iiTg?#G+|f-} z8Db|)yevZvAF2x@udk+88;`FZ3L~Z1X$~6z!W+#~{Rp^FjlhVSnHf*l?NupXm+LLq zwJ{L4G!YxDF}-J@siLY%I)Prq=%gk&MUIBMDEGUpJX>f2=f!TG7-N0Qr?U4CcXM0oQ+KL!QfH*(%1-{I2-c+QLw~C3o zb<`F=tFyrM3OMzve=Q#$&fpQ=@F2+FL_IaHPZy}e(c1O z8DY5d7vk=h8|xgIzYeVyBa=$DnMLWX=jIf-M#QuOQQzw-rk$E>0b1n9z5Imv`K_|~ zG`8&ZuNRY;FahOFk~pb7>vEuOq|L)=lo1SHk@&ok1W<)5nFh91p)T6EPNuudpZ ziuk$JbhDVZB3le`V{Hv~nDxCs&(f9=SeU*Q^F5Bqw!{X50gv~p^qSn)<67Kt$Zkzc zizH9yW^IwY({FdFEZ}HEBqw#_#j$XLsa6}p!D4KlWTFws2~=;2Ngo+FNz;9*UiQA( z9!~Q=*-LTZy?3y2nl+L5v->~OH8ec;aB>u|)AH* zTqe((!u4cv{FRFG;^>)l$qr(8Y!x5}tf`6)ReffZ$P;}jv3K-C!mpKTwY9xB*wkrz zwfnN?J6u`k-8);ebo68y^KFe8J7F@s>?GfQzX%CwT66rF4T4y*va0IJh)@Pf9RU+d z5rm=7MC}`0ZXzcaJ8>T`6EDlwmITd&8ZlxeQwDWCNwfsjem7lk$Ku0n@$}#36AQCo~_-G=Y-Zl&Y-sYMskh|zUquVMo?rx4J>E|`bKnh@Q3Umvd3q41G>x*oAF zL=I!EapmQc;%2cXwjQin8=P;QRtp}ySRYar4!EkCB+1cvoA>4Pwm(Q`W$B}PWsNse zTI;QcslVtKCnGV0XsH_h}>mpTLBG1xYl~>nL{TkVr zAzRq1>Q5~fM_c-8k~AYny5I5}YOc1X=l()WMsZ4x!0?Ezl@0&OWQy!ghp5=s>W1dq zjfjj&BQFE|Xe>$<^kK1XHxnx(qW9P!4_mI=t0o-)&SYVr%=&#CI+MD^pDg1ZI`H%8 zHR?0DKC|1NIa6NqM&2n6X(~zuJi@J)CT3io<`y<4T;h>k5{Q@jmMpn9?RlNvyHlsz zZJD(tC7g1v=}JloQc(njFg&i-{j-W9GWs8n>BImeT}`AEjVwcd1C#aFcp;?W3HZd> zDgwTI9`q1XM=Y7TUUrd_aM~onDDyVwk_)=^`eXR{07`GB+xhA5p|b@Yc<`3C3S*cL)NnSKy=n}3- z2@1Ha2_Mf8bU7?BG1Jp)@zsopmh)vcb~c|7J>OkwPy7^zDgkKByi9SdoBNCeh}T(z zFNu7m!`tmk9+~Dh6spaZ1HevscvwMNt9gscl7E`gn%oXEy&%VV+#Q%r!Q@>nKv5+YQ27nKbRRIp~$ z)KHUoMz%cQu#=nZPl@#Hz%HNg;Jj=W^Suti2^y`-+_U)Z>dBdvk!{e%C%Co~xRR32 z>@m2UKR!R*)DYC!gomd#jtaG4G9O{75h2YR(wJcF*k1eY!k9*qr88&8at#CB_AS0f z?j3iKQBfn^ETvy8{Ik80ij8~@j8FL1_$|yg8>Xt-naPtQX9JZ8%z~{))Jh|iT<81Fk zqJw51kCmO$K1ZZ;)Y=r$M3vabn?`hinwfhJ*q`t%ddgv&oM#sb7hJD;@m{<+t}co_<%}8u#6g z5B6Lz#47oNv7cx0lF$)pp^?|U(zKyd*EJsglduc;bE!416dAuB5$N|CO?4uSM>hYn zPEE6Ep#FTNGwj`#x;6UjC5%Z@oky)C@sJIY z)Fm)~o-5z95Pxj%ow=jqXMYyzx1_~ZBG3?}Km|t5^g)fvG*F%T4lBB@`k6`9ZL$WT zik^_i*p$C}pW>2a=T8FiT2ukeLHSPmNixH|wS=&pw?@k)k^Pd)fZ=%w^8H@RO85ZD-5_cWp`PB~1;X_6|2^i)bMP z={Bbv@--E5k^{X3lGurv_(=R{M!F#bDMs6e7oIg+_xSop7e&pJ%HqkZXG0b(9O}Tu z!^_(vTZmO{c&2;4*%@!&t2KuJ{W(irwEUtJpatM1QEcz@ zGZpTi>Vn%~;JN7HapvguuolJdFIl*bFT-k2`c}r4Q%% z$}Prqnz&u}=I2?mpGk`>t3$+(10RNq>FjTNgjVnMLE#Wo!OE?U5laoYoA|Iw#?m9= zT5Q4c)`+~=-=fkZtmErLFWXa3yi@s%V6XQ);86u7UlpW<`WtW)OP6_c>{;=jPo{%@h z>W{1FMk}@pdoaTlKRy&lxbo;dKD!#Y-ZYmRSBn*1>-T@8Y2K9f`BvTaolE@Bj{I!A z zp4px{NzCV1Lnq_lAEqY|-*GmjT3>c@WD{yE5Y}Lg{5>w|0%0`A3as(k<#zBD;v|LX zLC+RfCH97{ncTj+m6^F_^UvL6-jP}yTGH<@Im)aAvy>|%6vzI7aOtjEF}4v~;fbU!2gn-d29{muEGCx(CjA89NE`+e=%r5L)U!D~$h4Tuja zt|#(;jiTrAkZ__~F|#?o3t&IN3PjBhK2biknMW@7%Nbtvu~hi4mtW|>CJMnEajON* zs;h@`t0huT)5|b1p#Fx-x=R-D36`VhLZuQKy7-h=0PV(9O!l2kws*}!FcP8-}( zoh4&SsRhnHKEU&)fd6~I0ezet&`l$(65P+ul;QX6}mLG)|9Q z0Pv-CzoPuEm=J=NpBVO0mf`{L-=_m_#r);`ElF{(vcB$;3`Ib^1OvAITkAReUJq|a zwkzv59MIN{@Z~Qn z4?jcSVeG@2bessqQluW(-cGM^Dl3i(adsATW-yp^?BKLK! z;HW49*#biTF25xF6$R70ckZfO7Rbjh`7r&S{8km@;udIX!%{SB01CK$xj0&D>E*c9 z)$~-95(c+sc-I?Gu>b4sSB08aF{}GBPG=|Jgz~WL^pP|JF%F5uNNE=iXzB*0Yy>k7 z7>H$z1-Sygi4=%Iu8~1qjyj`{g8l!#M&AmcW;^nxxw~OwXg7(~yt~cU-OV#ZhDQC| zI67o*km&Fsg2Q=U8`L8Z7wRH;W&DXg{J*XFORJU%2};N{uxn-SxeK=p{Aqqo7Ci>L zH|bVvWDCHgovXKng@BlnKW<9)cbAp?IqClWDcaEi?VJ}at?j%nd$mGj`N&?0WM+e~ zmsxMyANxJ7715x9=N9x5XoK5MvHKKYLu~H9O+TB9i=sCYJP3YJG3nbU81o!I?ps@Y z*Qb9oGzV}VX?$biCje7`A^tx|OX~{?IMkyUK5W-RY>%}B{>|?F>BdLx>>qY>W_6wW zyZ#1B1@fGXH<=j`5+KwKIO(SVOex4AKWiVd^FUQig=I}mp0+kDvt)x4)(U!`5czux zXMl++L6ugBqxnRQ{NF6ZpFV3%N-<7OkE$ab<{Wc0)wZtnI|3sJ9*q6b*apH;UA#S9 zeB5G{GtkK<8iz|2a}SPnbB_dAGAd@l!Ec{nB@P0DYSykiJUKBj!I;4bhS`pFjSxq7 z3sW~&ykSm>e*>d`Mwk1Bki4A`u8?5%zJQ**@wvZYfZHD7eC$3x3tB1-L!~7U&|@5a zk-}M$@Wa%(#8QX0WWxTQ|4-9Dzt2wreR(jd%SrlSF1032^FTW06O;d6=R891n&#Nt zE;ehJSI7uLqt634{?`sL715N6W^9;e|2HG3NY4MA0Z1NDaissg%CA5xBq_)D14`1@smo0$LGA&qxdfHD64bf3PxpMJsNoyCOG=P_Mdkq zXDN`j!6&*fbe}qM1a>s;GZXm!f%D^VKh8i%4^ub3QW0h#5iO&i{ujpdZ?$6oYxm2u zkqL>FXko1{u=Alt@PbAW^?ux5V)jEee#(x(*YZP}GCSwV?mbk8X^}AIvZnltV>mBcJ@8-U;sEi_Zc( z`Yz!d3uF|Rg(6_f){i(QBShy0k}vXW$wA6@*009xa0)m{5kVTPa;ocK`ufJUR)OYv zD`I?vGS*j2aLN|!3SSoO?6quSk*Oe8GsMv%sB@N4R)HH7JSSt@nk|-N0$+cjbR)j; zD-c~xL%-dlt)gt)f%I_$%lcHpi75v2!4`u5cSO0*`v0l@sa93pR$g7w1}b8z&*q3W z6$F^eQU^`tFya9{@O>$&gwVb6h4RR9nW<0ADX@{6D!Y;iMe90-oh29DS$45DS9@os zQ9{L?ml7vwgou7ZbQn|bU7qjGw1td2kqHJM%)D~436ui`@H=Rx{5e!xTTj`#9?B2v zA0O%L`&Afp5#!(NlO-5vf*oN)%wbpBAqkd?Vh_K$)KaO89#R@gL2Dmd5C0yvzufiz zBmx_GrVIo;K|qv^MivWSh9X*Tn^t8v>m5~LgR^%S3CB7s5bXCn9U0toaagE`t)qkT@{KmB!vN=v_sd6wgD+xy})R`UHL!NxNFIksW|7dum^l*Q3kk11+_OH81kguj(! z^DGN@%mTXAOh@SKf9nW9^dqQ5HqhlX9iq1km)bVv=!;1ywF(gBhT;By=P_23!|+pQ zengtjJ`OgTmCB+!tN12*M_ql@V-2ilJ9@hz4Iob;eE8Y>nQouH7-%G~4VEdA3voM4Y4KAqxo@;w~(Q# zsKX58h-Ehh&q1uUQ#ben9q}uBSQ{<Zkh zPrrt!Ja`IuG2!S)79$1DEGc6&VKyNgl)%Oc+^ud@%ytf_yS8WT_;FWzJ5~-FPL2da ztOJ5Ag-~AZaW3XZ>Zk362$~l`Cc-`Rbl{AEj{Mj?sAlp>S`e~{3S%X$RL=RO zPn=skS-8(99+vdl%3Hucg6R{3ImQ8MO*9orEU;m;!eX5*Ny@GpbFR9R-QR6}qrE`% z4^(n6FUV1ST{*_(;SZIqSIEtmvPpJS@dqKu-ZK9$685rd-1G88D98eYj zvP(y8 z0wRUj-1pWY3YT+`sc1DNCdFD@kI(PpBAC;Te4+zo$p15HiWC2IxLWfZ$$nY1Dlr*)pg`!6M_Hw0OXUA78wOz!|#sTMQS8m1W-tBA1#o0c;%@ESg zMEpvzcY$zcmNb#B0^shis9{+o#;a{kycsJC;bIi33|PCyq|BVGVyFji!a#DFB^

z3RP8=GUlB1ss%tP{?%5Lxk0-}2`8zRrhz}dXz>qOKwg25EeA*d+yddh4|ZmOyFX

$Hqpdl?FnoVg2U0>LbaCpk;`|Q|xQZpDi#_7_MwGbkCrszpn3K5J zvW5NJz8EB$_GE1FH^?#NiFsuY5(!PZ=RFA+Mz)_bkgBlS1E_RotIn*o1s#|3RR(^3i%o||SCGfk(OFyQ72SGou*2}) zq1EFyx;aCNAAF5&_DIa_;aQvOG(gh35LJ5bojS|6dbjD-HCKNf#{MSF+3#{B!vEoS zgHD_@CMBiXC9mj&_wMF+d*KGiBB)$6H=8Io6DXbpBbMFuMdnw}ugsf8yFA0_t=Ko2 zg1_CXV;t{+3IFpug$IUa&Uy&Z2X-y3cP*^yN%s*qMR{=mElvS_c|l9H(A?y0Z=K10 zR>VM(iVAvEcwx^GjL)D$8S4f5tS{s6=ZA4EW=ys>-M7WQ0JjC8X#-W!dAv*`A7Noe z6GOox0GSht-T$6M*-(*59h#znt|XADA`w2b4$bH`)I^QtVATtWa8^*O&K1GJ*qtTE z@b!3r3=>|h8rq5=Cgc8;3$f*p0x6*N>Zo(+P*m*5qNO((EuN1FC)e2uw!R3D?`}(b zkuql7%A`Nf0(;vGYu+!42^W#0MgMnG%(X6-{v1@+QMa%lHxMTqtWQp+rK{JMjEo+H zwG+u~1Xk4oh8=ignKxBay7wWk`+~dJwvTxC4dsL4mR|6z$K4IVahuvB6xv!DEC#K{S##Q=gfq@ zLE6I5ijQz)hhat(*l#=J@&-_R-0fLu+bh5HDEPgVjtO`0Ay$bD0%$vv2RE@ZTr|}R z+j3>>Y~%Lt>e{aLktt)J^=zV3li=JiR1g1+8GNqE;Y8OWiVE#x%M(+j8(ffaK*!DXm*4KQF!+(U$EpJpG(5^eMuTVCb*cNVZ*?P_ zN*vB|-qS+Mp!q8ndI#Lz4xeKBDV8`1)aexyPz|_d3|neWs1Ft;sZgG z|Lr`F>cdx`A849%Ri*)Wk8PEGdV1FV^zwbSb6^xyn8TQvo;nzRmapaTqr$6_U&dOQ zdU`@*aUqxS%!7)yuh{3&USIQSplYQ8z*XIFRa&|h3S9qyc%2)7WBDWX;3>jkp6%^O zAEii9c~7!lVPyW=x5}qThkSHg5CRV0rlA()65x zSOXZkypApsVz2sBE??U%!?s_7cULH)vHR+sN@CDx+e1T@{92hoG8Ef%Z~b1_e!m6v z-}~br2gL5%l|a}_)W$6^iBmmXTYaMr1!fd`6(I-YdJzUj*@N5lC6xCSg?ca|07>*= zjePcA8h2;C7cu7+AK|6Q&4eswfvky)bw;qe!ggcvcT`Az1Ar@*jvoDUnyWPK+O5W( zR3=V?o+ zXe;baHgt6;U+^QbS%e`+wVWRXAD{F#D)b=_ zx4^nvScx-fa67uTns0iVVzxN>IYS{DG}(iHQKxb?YVDjRw!iC|*|Wt@)(VORO_V@e zag?EZ@sjG>|3?3LXZ`~cb9{@>-eyGZQSs%?SdDKNq>Zt~fhB4YquKVDJooiGG};k} z#=2wLIZy{m25q;LbMR$obz!bgei`$F)SWRMk*XXJ%y* z8}q!X(_CBSNNdP8!dj;*0#RqcAiPuk`ZX(_?X7?Tb{6uKM2s zI2}934B3g55=;P2&;^;-&(^(-`rhdDF=ThZbEFjGMQbLHV{ni`N_%nuHB zS+1`N+t@I1KKC#vx}!l0#*)^YW5tQ>W0x{L{=wPz+kCd#IT-fx;^EhH+DePdiqo5n z#P|}<*rS*aDWn!OggU7YQPz9_u5LQuY9=6-7uH%b)O~Yvr<1olv}3gmZ)zyjZ+GD} zPbX9XCme(nu65rP@;xl+6fj~V*1Yq4E(Uj~v86a;=n@|tq@-a^()OnC1Lu=SSamMn##Q6yRJQnWUU&~y+r{~5aIEmL3yCZ&R&9CbYT zA$mZ^8yG8mAZC<_E|zz8R_e00M3%&jGlypFSfBgj0ExiGX4)otp*m)~vHsCVZB)}mnM80tknQGt8O)JTT$+N&a9+^` z632*Ja!h%yR?;3*s)V}%cTt(535f-v-e`{P)EL4-N)hY{+X0Sxfn!wM-Opg@l?uXl zD{JS!;^RRkS}Ouli>H23&F89XB6^>3pD_tZ#&y1Nie*MODwhVAgGf>jFJEq~8pW%IQJjtGS^R$z06$ z<2Ns9^haprxHDkvP+4dL7`2>%&Hk-$$F3H?}#R?U;Y;s{JirMhI&%dNd=#Jvg&&#v_fVq!W@Q!F+?oZ z`4KM1R8Dp|-6-LpN?hQ-J%Kd#4E6KhfB}*)!T*zft79vqu&|3NprExLQvldDDHQ=Pv9`o?9afh5n4FXI5kye=SPGX(`>_saE) zWuFJR`&tCUn{Y_SRidB&Ne0aBI~+Ywb&Me?DGnad1fyd4yj{AF?s$!g^Iot;Ef^El z>Ogw@iiQ}z^H0@~{c&5k<1q_8WilA^@4fG)ZSj9usCa5MDNtMaN1(JnhvOwz+PnG= zrc}JZkL5DBTkk;rUmU!n>rPaL9sSb+X(@vpbj2u&6qy=#q8*3Tsn-60-pZ0cPBtcH z*tjvp;CepdAb%nZpC}(10tyoG`puZF2}vSwjRBmezxlCP_lDq^up&RGTH%=MAL!E*n};HV-a=Wv^A3AftVyxYSccvn`rW=NVrcd9=Z?ttU#FbEYfrISpObeJ!yf3No|7P24G^!_xe5V`*w=g4D34!(L`*tD3>KOr9wXu z*j>HG!z%2WbSv`l`dN9gPxLv6hI)Yr=#C~d|v`>w&;~2ef~H^d2yE# z#loW&@Hx#mb7*T|2ftd3^n36efs1q+1;ao=oEDZiw1tMwxgY%9^y z;6g6+$btiRDdtb}+`JY%`OvTVmU>8A+D-GT=*r)cw6OKpK)1Yp_S zvFa|L`p$WZbi+(0JhXhf(!wsYdU?lZ0x^nzzA@W z#BI5krj^-1epZ&AappqBQyWf*^%y3gjEL-_?bVYP#n@2(-_BvaU2B6GKKZDv1PCk6 z|NG!#IuN}tGy$tWHgf@j|LW>uCr`CWY9-;78L#rf;;yzm8}pqQ!~jw{=gSkgtkXH4 zf#0hudU_-@$~4968Ffio0{rvU9ynmhto=xO-&9Xa^P;Sgi5(QiR+H**)Twv146we) zzhnbHp>q&^-Hi0ha)Koe@YPE`BMhY+RR8x3X4LK8j@o>fbVYhAOzL&N)pZ4JYlh|N>&vlLSEl&pQzC~to{j+3 zt=_y1{|{AP85QNb?mhHKInvUjARS6~i*ycM(%s#Sbax|-v~+{CFd$t5QqtY^KKuOl z+55a}@qrJs9v)`h*L~G5ZeLcf`f_uMARO%5`rQ{fsgLK(51HBu5`SP(G$~Aade}?G zB^x8zJuqvch=Ma0p7Z>0Xjr!wllxYT@wUA75G-?Q&{wZtG#=>QPkDlHZU&yBn7>%lU4HK#hy@ zrM*yb63#CT6>rX8> z(EM%kUT&Iv;fKb#)3Pd8O@dkx!yXW{dmREN+oz>PuS;rQhh3wsak=++NdQS?c@squti*B2)x7FoYb9p0(v4e%et5|4)% zx&}?OYBci_&tg7K-XL#sgF*d;U(2c*n5!&4P9IxUR-~ut3M1Mzxm;<4&26;L2z@v+ zmbT}?LQ;`_B?^Jkp9I%T=>PXJ0=S`wHtJIg)S67`{Z6C}-r2CxoyI>`?Mn(oL_}@N zNAAG<)7^pNb16NI6ddaRDp-6L z#8iZuFKGrJJ0CD**5~o*-Adrl4FW~*=c*@F9}7L*^?-iw6w4xT&raWr7j?262`V}t z+Wq#eWG0w=9|}1mopXDbf4<6nR+{;F+IjnHA#N`61uC9_>~h^kbfg(m8{IlYUr~C4 z4I|_8d^C==zkuHQk}ChV&k~6$qY#_eR^{bXqWOL2^mAb;C75-;6@>!hYs^Y%i7g8* zO!+l3Y!xDgR_q!Cp2_x)U{09&IVo3H#!o^RgUoAulhec0dr+VR%z-C+>~t@@Oq2G7 z1{WfNK0m<-;q>G#k8n+uc{?pc)V|g@!cpp`aW4K}S?r(a^))}B5Bk1ihYT7zg;o=a zYADaB3ch^Bt2mhvI5A=MX5E}Nje!7gFV3UnFOofZl{O|Kp%UA~84-BXA2g`qB!d_m z)6_(6XLHA8*L6kVcR$z?Lakz;y*G)@FL?U2buJ;%a%y1U=zxMap{(!JShBG)y7MAm z|F=)`>g!Bp%Lvz!jnw1zqqt4@5&!bgHYKPwud6aYXM4+UGAeHd3}uHz%p=R zO#5|Ld>1K@4bI=~Pu8jW(%j)>8>0mq8vw79on;b^e|&h-VHT>bF&<=Gy2q9;iGnQu zH0IC^Bb8LWOzy+_&->Ya;$|3>=u=iX$udwzt@8`Gz1e>*(mDZ6%}1 z&iG74W)uTMMiDRf-t#|kk2&xX;i*PQm>~v8Hym^Q@B7&>WV$2V1WpV@=~&P&VefNx z)Dtt)b;upT{B9kjS_HB3VdoWIv4XdI1ZzD0r$ZO~&%a4%7PZD*8<;A;Y4hUZ^qt(c z6DpAR1?#ju-%Mq?9lk@A9Eha89K$fXi*XYq%^bpu0QY1I)cq`JuB_rzr+>QF_p51d zN2RUph_kXZ%wV#Dy7PZ9bsg%)lEX@cXVI4Jb*$Ga^CG$bCZp%Ax*;Fezx)hTt*fK>ES|VpLLk{ zbNEBBKhZoiJQxQ#UnK<=7V7@HF&$U*;M-_>%gd zM>4QDe7b>m-5bBJT~FCMr-^lG;(Xd135k|mglhN;d5uG|&uv zh4S)0E-j=5s!E~IuGEc*hY?FD7@a7xQ9V|Uf=OKkz}eJbUpogyRC;#uz0E=hq|F|~ zf{%-6{&3fxgNn>!w>3PZ+ayQQN>(a>q203`e5SH&QD|>*;f8NkGJjy?OKv0r*(jDf zc63sMtZmVap5Ws7Y4LDO2V3;}*HMr?V5)MznF)XZQyC>50zwz(S3BG;r z8fm#>g8Xq9dM?9pZAxMq{7%REP8U#yoRBl?2;XRMPODI&^A`;5+EXGw!~LGjJuFbb zh?K^j9$U}PQ%$iP9G%VuUZxhX2CR+$J&QoejOgeOYMil?bTZP1zen1zx~8b4#tAE1 zb!6aQRaOsk=Wh63NJr2AL*-60-!)!O@t1QRiR%R&iocd9d$l=WS#s>kBy3D*a^|s2 zx`s|YQ&abTsbiCpUA3~*5;3-&c_*L_@6Gr0;e|;!A8ZB6yZV7R6{{^w(e**_mx_Cn ze_@hnOq&oZBAByl)B(%o^jhzT&+D6*@Pm-|$*Tni!TYsn=k2d7cJp1MBsqWymDV~bwlsJRGJ6VKCXJ!3cNu9*TZ&wG%|>qi|4lb~lvnhC@$O`FaqA63 ziPx)OF7itSCdPuwq|y8LC^Q2uwWX{3bOcEu4K9|c>$kS^TIw9mAU7!~>9VTbIK&c0 zm~*~Z3J5Wy_*j4=KO=w+CDn;aUj<3WD9WIyZ2 z=PO|D=AS73dv{ZpSubVBbVulM`}2ovK%V}{vK%e6w&dddAIPz@qdd*AD_zFZR<1nM>!Y1@wAy4mhK$7!84Su+Z&vOhm};pA|l7d3Pt6d zRi@*co+Bdl?@KDgOfsnx8@fwXd==eVN@dtdAK^N>=gqE zJeBYFfb#nsiZ7vHU_&P|~51T)9C{dEnhKj#e8OttBVV5QMx-eiW&b87wRLubId z^b2KoBPCiw_xYE%$U6~5c7FK(Nm-~HVWOHK?Y+4#p`vzre}V5HD8802s4vAM4x~J3 zX;5v)7Tn#91%n~v(xi|7to#PbDOuHh%qVziiHeQpQ~Sx?#6^x(f`bydX?k`j@O%6) z0nA2b>er0%NOvKf-j)O3%n#H$eitl>XxL!6QNFU2Okrcv@1sOvKJD`<(#FbCJq_Aq zT7(2D;o$Bg*&$$ZtD)TB_i&Lxd6MGNR8VH)V)blDUTmZ@j40Ri;S-W}ww9!dMD*<3 zN2Gv+0sx034IlG6tV9W>_jwl#Lf6{2dL>>atEL3&hTi{@WvVo;^N1Z3I5DPg$q}zJ z_EqQl6R$|QrYmQSsQ?!V7!0;2O_eK}Raq!r%vdZ33zH56i^VC{hhcHRAbC2_YRkKT zv)uo4d4NX{(+o(#H~L-n;~S3hKgWcKbK^`Fq-h(Nh_R41$a-HQ`oDkXN!-`o@m=x=WzQaeFfMS&!^c`R zHl>eGUsn5lO~C{C|BYiIKSoDum$3>T#GMkJ-x;!@S71Oot8Wo5!H|%c^I>+gvXOh2 z$pN$GktbOEut;faB5;a%vF|vC29>oC_;ow>|6f+%SSDvOrM#p ztyz!p;T6ov0+7;F7b41K#FN=B>Lzl?62rNdcW(M&1wjVLdPNBl?#Q$uq$8V-U#HGf z;W-E9rCzFJ^>?V{Ru%e3hX_f6>H3WLGI1>0H*~>4iBkfVa@m)cG;MsS9|y2xuF-;e zI*;T7B#r9N7p#4&6M6n;e)T_Ds-4@qO_;*Y=BiCMqRMOK$c;tUe?r>4eTxPY^p_$x zO-o}L`$Z^(=p!v8JVY#_Sneka!bL|q8Wi*;2yhaj(b5JCRwgb;;SMEji~AJ?48vny zZxuk)pfp2F2c1sE_N%qYE}#3$PF!8VCO{zp0df8}6HV^Ky*PNbK-Vcwl*KyDf0KP#i`W?=@#@7#vp6}FY=_42; zE@vk9%*AnTCU;x3V^M9G-EUID$jett&c(410T{N$v=sh_W;JC2j%?{=< zZd1dR&ZoqbAODUS)9!tDqu=4$(($S>1M|X+DD40Fa@{{K z5Onj0iRXlYVG?h%Dkr+tHp{kD`QCSdTqSeg= zb+qIj&K|iG$o8p%iCW*+x8Lmp%Bz)js!2pecxR=4-WdjWS49{=_YSnPNMP1?qv1aW zn~sEhKcHNf>4W^c&*i03+&?o6UC|y4Ey255?1(<98~K>2bd1tDk?PYDbiF2mH8{HH z+*J;|>>1i*j8(K@oP|_xe-ngAvy&+>G3)!WbF*_qx*Na50einm1`sL0FhTx1hnPR* z2M+#z%4GP8_s!<)#=3XycPlMqUmJnbT;Jzmma5KQsw7sWtsu$?b|Y)kb@g0*1zXbz z?22ZaA5SX{+B^~-TNkJ5@T)i2ucWcYhyWk1zDh5vVL3HW_CwIuGBE;MB+J%J0DbYUd=XO4hlx4y|d zX4p?0Iy2I6;gmVoD)LWYOJVhGTq1YmWGKcd*VU!}{%P#g!oT^l)CCh{&u5(4r=&Wq zQ+p#TP>(K%fX+`&2zSVWjeSwf7~UE_9}&fdmN4EfANdb%Iq>oX|GIrJWzLl~Nft$j z=*`(MQL^VH{hT|v;oM55EH%SfcdN?T6h98K5;C;Qs)zkrSdV)ochhB$dZ8=*PGupm zyc{j5F>oU0J+`L+LTnp75$*2cqUpPI`3i}xdk0&y|82*KqWpLoQCO)r=}s&799dwj z9G_b};Rstcxj}WHr%1P|tfQxktG-pAJA8be0&=3 zwNGOfJ@XH0CR(zmzBQGmZR6qpnHLZVL0pqWct5QkZ3+(ubB|Xk zT|ADw1)CRfLs(9N4YdDi5}!asWXkZFwbk13b2t0s10aZ`*Oi{O6p`+>&tY)1V0#%M zJ2W-zuR`u=V3F=qK6vY)r_Yb?8gCmg-X*iluA^m|)jB#K^5>NOpP|oNgVf72p6@z5 zqL=rfy@%4G_#Ic%M~8HA_envyO*^aA^Q%+DMfW~%w+9#f+}t8IXR|iAWBV6*Z8_W z|NDd}A+yFrgGYaV|2@=hB76R2GW-AYe9VFK(}&kl&%uiR$OsXCUbSTZxBLt0xlwzd zL^+AKi2ncnDz=6@(|@iG1J)--1H`od1O+RRAHjpLZl0f_FoWj&wa#;Dk9$u5z-~ikEpk5IOF%SWwwes^I;~;E-kt3wU&Lskh*tc5>ZL1K!ltjx>3A!& z;`lRX{rSBN;9le-RWoyL$Aa_J|K1DRmx?|_nLb>UpHvjQ$by=)p?v#rXL|Qq(yWvB z+s9r(mLg{XV7v4%eQf)}^yVP&HVhMm60p=*VU`86ONM|cBlo3XQ(ZH1I2RrQ*gVNB z#u)t+Ju4^(n0M{AVOewpQ!Tx*2IBvoZz&!?x|1UKDK0#Wdi)1>*HCIT7>gVMU$9_h z<*RCLXz&kK9cFbKcAN{(Wo*eR)sf9Jj0&q;ia=_5L%clK`4f3D+uj*gbXn;0)8ek^D(U?_x8ZFP!r;NfMI4d2kxkzuU1 zqkIb4z4fI;PMU}tJiiw5P$3Y;tg7D8;9jFi z+G}M_XTYyXA6Z|s9E@)0%6sWGKyxmf$a{O}tvg_`of6qfq_u<&r5g+m5*za*2 z;}}hDY_Kb@{5o_HC5MaE_FH(v%%DdM3*V`rlNcDPiTA$v?`!^7wE1?r{O4z7xVe7k z2F{*GA?jTODl!asqA;Zuax*ulAS(1#UmZn=dzjdtehLNtNZJ#5O+5D_{noetJ}1G} zmFcvVFT(aWW`@V66EDTz>C;+d$fPXp?TI2hXZn~uyQ}DHXg7tV+uT=%f2__D+=?q| z*-EZW=aHDB60x=`t*(v$&Wf`VEOl=772I!d%*%?S*+}B%`?f1HMegae!Xr9$67i-1 z&BbUDkM8ci7AP;gc1Ue6%MMVx?&q>sxc{_YC7vHm-zn zIB^XxdQ%m{3P(i8SD!r}-}wjc3T>ZfpStIENbghp=~ItV-hRMd`F4h-CQj~F{OSyM zQ4+`r`}F#}&i$xuYk>m&es(h{=NuZMLI(i6HJJ zRM#oajt5fy;4^(H?Z5$- zeMT1R`CWBYQ1IK6-`W-DsG-o7D~1wLZDT&?p(9#*%2&9#!9!n@Iqf9R^icPOAr}U8 zv*qh?)nQJU7!98`y2RG(hLY8QTLb3d{Q@gUd?Lc;U9d)1MxxZqj8-HC6NwE9d;eR= zYviP_olKAZ+}J%x6a{CvpNU+Rsh5VcNb$05xt_UqL2$lp3%L;%KWEG6z)AsGa zGYPHDodj5SkG4%z;nXk*IO)$uif^TLOIs->IXf@%LTxuAspXXNC=Yn*+8-o_HXM14 zUnnXnH|W#~zZZ}NjIe8qp&}>Ex(O|qA+5HB=P2JwujbthOl}M8=XP1Qx#b(#{R?(m z0~lmDwef{>u03^9q{0aB8x(0XFn+8k#b7V+v&_~~MHD}*=cy#Hhea{kDNC0XyykQH ztup5=DcfMeh*-j>)#rQMS3BH)>0;no%ugP+(rIb8!UWOt@ zl$)HPNPnS?crpMPwZ{)HHsLlf2NHVdq>TS-oM@L{czO;0$Mj$CoBqB(WD@uUAAJ4_aLXsBwWQb(i+t^t* ziMo0<1VW!8ee2w~Jot7MvOQI}J2L~*m2+7zTwlLIh9)g7KCM9#sE4~XX4t#n3@>_3 z-&JVeOCofs;{{uHYS5?TbuW3bV=Dv_*-7j#cf_|+vT4{kNzK??k8yhDb_@^d4wOMEcWun`S z8e1BT#(S5E2i(?cr_|r_*JMk|(V@{%qRx(0L!^J=V}w9nqc&nvHC6@+nkf@FA)wrQ zddYupPz?cqF0!m+1nB#jWx$vNkREm;x2lR}XIi`NaJ^FHCx{4rNa_}+@2E*>cI^^M z9zIl}yZV!EuH_`Qa+?$Lsha}*#sk~LDdaZ!63r+vF;a#vmW%KjPc>yXrIUl7e5Z{? zw=@PBFdV5`*$+!bWP%vNtV%0d+}jxl&_5y~$eu2mT3WlVZ+{hm&CmyCDc@CB>z%f) zpmH8aw9jqb^=GPMipC`~HJ6${A8xS7lxv;(GzbX9W;6w1W+u}C=o>R$Z5WvlQT7CW za-#Toxrp$4L7(>hhtu)=vK9r~<=SGadu`Ie4&NhVGo#0jo=NQ>qsA6&==99Y>~BSF z!WZ}60Mti`k#^YGldjg`uxG>hT_m=&uG5jm{X$L*TAu=-kmY%6eOxv^Te|uZy8Lq+ zmBv)C>-n#N-zRAS+v~D%SBV*lD7n8G5#kQd@jioq3dnx5&<9s1gCCad)Hq;auuA&F z8j$U;DG&^*@*jOy^}8G-1_y+E?p&DlgGuWW(s6SK&jD)|=CFs!O0UR@{h_+b^@Ovt z+o=*>EdaJdN|fn+y@i3rQESkxO-s)f$&kC^(;P2N@wu%y*j?agD!VeGT!1K>)%6-s zL(i$&z5{qB@{LXGGiqk%Aquvuxw?Ay`%QXf)e*_;Tl-;RP4{8e>h58*4*&=TTk39Y zfa@G%6d$+ruz=#E<@szTl{D~SaLLVVhmdEVF-aLa|GY%u>N{6IB0|$tzLCmfm7TR9 zQ8rOpTEKE6&!X>_a>Ge=h1}BcP=4m?kMDq)-}Y%@85cbFszn!&5~``G8#rA?0PxnL zmaqFpd2c{pirpGHbPssca9{8mTL>g~3UIgP%p=b(1(ExzsO$?k)^*U~VDgh^W@Tsx z7Z@BXD$16N_T=hzuy-_<)9&@F459?*Ffu-EN0ONFE$Iu)F`PX79N9arUXd?W{<0Hp zn#J>b#;x7S+x4{1s_7?2)4dL1tXnqEtl;~moAqeErFHUFJ#7`2Q+G2lo8;9)0`AT{ z+RWX9gR{N4*igq1(Jg$lA~ZJqgXDUewv7F^PyV(eXJUkD_p5|DxJg&C{!RrswhMu3LAi;5Pr}B!Geb-B&^f$r1mG(>n7U!cCpuD@iPb%%jbb5~p6h19UvZBGRsP7R$09E&DW~xIk_H7~q zbJaW6fyun*Kf5^^+&4hQe{U#NHvj%-zt6(%tRpSQO!fY1=lR{~_CWq_i&vk5>Zt8U zx%V%^ZFRU^9)OK^ugL1%M^8%!DZHDzjPlu5cYd6^!&EM(5W^x9%>>fA;P*6O#6pPX zHlrxG5YK^sq1FoowdW%xEoY2>sn(k|(XPDs+_G?(;Cf_j#|{KpPHvWduy!Wm6;4;4 zIk1hQ`T_*X8Fl~opcP500Qt2HjVExUg>fAr2EL*(h%(XiJXY5CX}5G@OZ>TQnyN;v z6g6026YU~*TE6Oib-ApCn7`t?krevyyRo%(p{T)4Z_jJLSCeeRm>vmQ8IJrG`rIIi zl+oD)!mQ)OyBl{A59*BHZ5h6$9y&++v8zms?!H94QP03|-0pmIh+zinoBw1+89WoO zNVR#tH|hV`K)8(d1W?M(s5<4h<6B$QCq(HiYh2uL=G{c_+8A)XtVT!C-m^@ZO^oJ% z`K;)wM2?0Vm(!EB+y6=HgFbcnJt$9JB25uy3UEbr9*ut*61p)q_5A$$DC!C5y20P4 zXFPSQobziE=FKD2JFXq|m1ZPjGBveq%VN9yb{{q${_NeMZnjuUA%MxPY_ha<^>y?r zO7$6r)pp&job0^zm(LbWc`>*})!Ml?8#m&}JquchX|LF2YSHr0Zca>Y z$5q|bI)I&4IUj{hPQdlM_&;zMh3mVg#nc9;kYk3(H#X2lcv{&9EBiOT@uka?*DQ~g zFv?e~Qq+;NRh8^5;azOERcBwz)z^%@=_py$c53PMd;W_-6+yHiTdPWj-2%)+)=xJ- zqH9&5UttXHyXbQ}Fp^`e`dY_KV{=irl2=DOojq1?tkh2KJzeJWDQ^V z^uFtJW|KbOtd9QlbV#y`aCceNl{sY`OEPN@{MbU}w+_;=-htQUGF{;1!qz2lPBd{u zeUC$M2f2YgY+6yxH$UhH7HMsjK6rH}4%Yoy$ghju2r6&`M||;}yO`Vsg-~Z>`mhLc z(}T=UZKbuACgy9v!}>6xNVSo*{(B%OI#jGW=MbxT`T3-05OrYUOqa#2?2lqlUcTkJ z;z{bUzD>*^MO2gkaW*U1qx0F}^FNmhH@yRG5#Fh&92F+5oJn0q`GVRjOOKXlS@{{5 z#3J#Y8W-Q7+4i@h_lM@z=4I8ZvSx#(+hFLIB@*6yRqIK7^cxn^<5|py9`z;)>}X{f zX$Q#k+>i1=C|{hlOg*Kml zzBcmqG3=$_M=$^YoSA!i_PtYceEXRDeB$vgv0mrP7~4MTNARnt73Q7Qu4e!RY)mv? z7bT2%NWO5ltdr>oe)nPF`SJPoklexAOTM)u2iJyao-#I9wj$)sj{STJ?YVd$D@PZ1 zM-5J_1TXRaF`$WBXG~HC1u->nFozjA+B#UQoC*A16iw68dc2!lTl*0ea2U)nO+6~q z;=QipwxgXo{yeDf*Szyf1;kq1uUy7STpkfxE#&>sAA|s#qRofq?f@V2ooZNdiiIDv zd|n$=DE|J(6@Kmio(^wj(Y%nJ^lne&xdAOGq6(qj;dtQ)yLqEZ4F==5D#k2GvdpM@ z!MkGf=SOJ#Fn+5xF`*jn^S<8$%dTPk+Y4j$`+N0qH%@l;*_o&qxi?|$LS3(*Klabb z?d&Gm`wl!T?BE>M2BHMBIM0`T3LXK^`cHe0#qO5?2FUN`km4+)S3$q!Vk8|`ZU$Bn zj-_(2JCGePIUd@2v>j^o63y{8u%kOX2`Ybc|sT{X!dFohw9!f%>s{+xKy6ZXu9vX8hW@!|k@+ zl<3wleB$d*o9TAw%lT9~$((y{JAHl{nLn3L0kO8f*;qwD?G8uN3_cyL0g?SUyuAv@ zfjKF6<@V<8p00qb=Yek#QrC+f9bgc6JF&eAkd8*5^ zi5KVUro9C~n*W)Lm&Xr}YGzRv~zX0Q!M}d1an(z&!v}AKy2H#=#H3(P_8Xa7$sDv1punyEWS~yIcN6kyYiTTR z5ESHTGOv(EFP)<6@~l$dY$ieKqtIzH*)c=~{Y?@iPbW&xX_DpP!+BQzQi>l@wjbdC zeEi$*YYVflo54+!{<5lSGA?pm@+axd+fg&m4*9vUr*+^Ug_ua%*EF|Y0X1l?;@_gg z6=3n1NRrtYR%83Z&cMLhKpIM|I)$k_#UN$wTIhvBEy08*jrtBUUrwm#H7ql#xAh z7Tp?_qNSyhqK50LRp!H$@7n&TVZ+E@-_ChsOiaG{(C+eWVJk!9;`iE#oluLKg|~x^ zkAq7`R{e+|9yEC9ce~u|_apeAICPUoFq6Ue1-Hrj2-cvAMjg zbm3@PW?tIp#%V2Ws??0TS>JulaQ1qE{6IeJE#8S$==%>G*>J+RZfz0-_Qa{K_IrrE zd7E(SmbiU!{TNi*4UJ;eiyb8X3b7{V6JR?}@_gRqsSa^RkHssS<_tORN(BvY6JfeG zvE#_vUc6KgNEc-$EOPe!yt|Qk{j|4-Z*_m#;*$rZZ05zmL2)p3@G!Jh8Z0~;A>&+q zuvaH@4_qSN&#4yuN(C%v9$k0S4!jMGe7^H@LFidhlen z5=vx!A=2-zpcZNRtEJd5afTz}183oatuf;AT@DwHF7IPY812Pn#}fdz5Ch*H^`rc= zD|_H^ssS-C**rW&4z^JTbP4fx*3`AKtweU*)tr&jfdc*EgeM-*;D~aIjB-oK*Ss;N z70xf%X|df`VCx3FwIbGY+~CRsMS~J|B+&N(EAfEL9DP0c;=CHzrzz-IbrqbY`a2gp zP}$1LkEQz|;_ZJl67yli`?PcZa^M{TG;7O6hn#b8LDSN8=Bj2h+m zL2$Eku|&3LSZyMiO);p^)d>__>Eh6}^yqHw=)v^aOVjSPYQBgR?OH7I!|fRRf;6IQ zw^(rV)uN@yCO}-``lZ=@4nd6Bi7F5j>~b$|>vIgx3)Av1J7fS|Yx}>*kK54Nx8w#( znr^ehBnm*ZTgW}MbVg1XE0uCd%y^-+gXxkZMA^dH zHGU7321X8H=zQ%zuz1rE@XD~;@$f@1q$#^?-qkI(zhE*?NS$6FUAxZF-6;)18-cMO zKC@}6qC57grLIhu0KNOsE&tYaVy3nH)6kAou_KV1CZMU+HMGD!XEQ>gMC@str=~RE z^3yy8x;vp+Dj2T^>V~n|yuC*RcVGj$I9YiFpoQ`hL@F4whS+an0q zQzMk=pWRE8*(M0V+RLt$^bEjgiXp82Snln}e}(S0Bg`4KXTG`D!Y0z?=k021?^s?~ zE-xkc`4bCsVJ8!#Aox_FK!tAQ(P<&lII1=G={RMuvBQj$%FqKFK84{9+^4G1l z>X8dEh#?bOj*edxirHGLrYd^{!3D~-Y2&HlA}8G0238#?tfwA21(e^YRY-O*=U2o$ z74gUmK`NwI_fJPOc5^>^LzGX)C(@La;~9TNNLJ7pm(A@ap}fLpmE z0k{rxD!auQ<)M2ThAw1MZ)PuYQa*MqaR4f0-&?W!4^J2AJl=@-*?C#P*bkiAjFoB* zdu+q{x&dra1jR-STl$>I+eDe2GrkSe-2fAh!<@PPOe-t31gZKRL_ha_4NFb&mS|^Y z720kx)Q87tF=^J3%;8I!MnYJ0DgCkIqBbU;e?qZRAX&l>hpd3p-*0M( zn&##>2P;x>NFFz)(5{yG@lisRJDJ4Yqc2L3t2u zQ%Mk#WQ#D7mWmyvX=^_8ZADOr6UKfzvo7i6-2_EIocYK|gNJ2Ph){9*Ba+z?8E7+N zuq{%Roo;qK7wM?42aBZ__#KCwo37g~=Y^x_m=%bj$VfxUJ`9XPPF%;3gJuVGO3}>S zWxyL7=v^20{{X(~+f)YL1r5XPa5F-n;1H%oQ+gZ2fFCgxSuvPFtni)Vh2QP?&2(?C z=NnyG^Ng7HzkRzjGZnB)7^RfUN((={9?8$v9GT@(VZv&VY{$sYw&}(mMQszUto-zH z&weO@mR`&AZ6=C#(HgCPMDnK=UQ*ugyCt8o12zEVP>cxTGtcL{u{qRSS$URfU~7a| z>D{DS;AC@6(KPx3l=`XY#cyHsE^P?!2YC<@F00xuMM9?G~4z|~>}Qk|oz z)p@m+4@fo~Cx!E~WbN)nAtNFss^qS=m{mtBQ`Ud)w1q6s{Bpk1m>W$0*2+>cS?^ED zm?BM~cfXx2v-79+?^5c$DvYIU(ecfDb`D?zuB_Fxw(JSU=*!T+xdNr;9vWmQ(%d-+Onczmm>Fvnr&6e9e1F;rvr=ukKqh% zJ=>kCkn6txN3&!qevAMQ#!>ot&n*fJLgcTBD`_wYW6d5D7}XU4zcSwSR-lQU=*T}+ zmRi^f^AKJFy~!Bg%9-W-o=9*kr(vyGI%b8^bM#APo2=k(+n$o7 z>m#eKWZaV9&RcM8LC)X9vPf}4wZyN9Y>JSp)#sEsR;oA%gR#G|<*@>hwwKiQqkh?) zb``nYy(EpiTZ2mz2Vl6IG)CwStenb&z;ZvC=vp4e@3Y=Gdl^vub#ZIr%ocJzkTq*7 z-v>tBKzS^$KnB2f4q~g5rY(V{3v(`E^$d*aECtC_l_?{I!CIV} z=1Zn8uMd_H3RJ)5{_f=JXumu5Ju_8zTB+Fa6l(QFdx2QW9i~->PN}8*{dr@~PnE-_ z>qj7n82Okm%#`czx(pvfdrf6a&u5g;{4?dIHyyhvezj$^p_O3x2qq{hbKgpPf1`pV z6KSfx5n13M0#k16U&s<|AO5w=cWP=Rzz5#0t8#LE5Woz5ud$ zSnf|_bQC8GTZ8JQ%hBuKrPyKD+P_9uiWFfDNVw}q8T8N z+EKlu9X>;^|GhX^EwGa*U2izn+Kgx*gOjs5=8E0k zXZ-_N#nQ~{cdI%;wvm*=hIj`L$q$Cd5X9`D(v;MvWA4Na3lfIcZKYe=aURkw=<-d~xIU;wP$fX)J{?kj2tiPmCaKQWAi4*BYHS7IXQ=yICC* zY?Lo72Xu5g&;P=-{_@0mgWSaFHuAc5_Xvw}sZJ&~!PfOv%UPudsF;$pZSS3}8F zK&aisaP6>fvZtu=u=^%pa=2yd@@im__}(^LqAg>ujhLj@ca^5;2o}{i@w?(}jk1Q9 z-62T$bT*mxI%ctK>q=stVjg!%5=m5U%I|unQa1nO`nb!b{HuwB$>BiI;Bvku=-(K^ zoNymRBL7rR1?MLA>UUh7?f0yiER^Jj7`aq+R6JBrZTc-4YJbX=+7DwX5fdZSo32AI zZnBbzp`cHYh={uyzrWD1EaueJjaK)%lBTa)s_IJ<7t2HQg$hK2vegeScKzd#!kGh?*t?&kY2T0O51$x_9POOsHOPfRp3>04Vl zDa9%7?6q~jG{`XS_>mNf0eFwS8b=o+=VMdARV;7$VRwsRVNaoi?zwxV@qp%%I^UDNKf%*S5CEdo{ z_teD&`iM52^&@%t0`DS)cQ;5=SBANmUv|X>a;24P6?@y`Q;92@M;o&T zZM5|K3~;mG>odlTu_*V=hNdoUUAr^q^7ytFoj#n#tf!}!?V`x1 zFgATo7{dpJFl&C~L#ls~Y@)O|W*SRBqn&dT6bT|B+KK2duUVy&0okC~i-N%pp9+4Q zmlqt}=FnZ@cN7)XeiiKOOmF4lH$Chk-K2VXz-PIS5C}S$=O{gJn>eh}YWZ^(GMLE5 zba^_ZTlpyV*5X z-|bqWBQ0OpSy4ejIpSaosqz4GdZ!EBvV6Lri4p3eVBeNhFrOW!Q!9FZLGkN?Zj6Ld zl?dxHi|>kgsKvE$`VjZPS{w5sBH%yshgbgf z?9`}hhgBljo4-wjxx76%9}Eo8;f$nuVF&xsz#s5_K0jteB`B}JvUn(|sI70qWu`Wg z#`8(&s`G*;W@CgcmbbMPm>m;aEf z&Uk5sab`uZy^H><-uc;RNvHU?kB@yF0@tR5sMrlEBjo&=%L0**U@I~3*ANZN_x|`4 z(3lUbksQI6qWPe%&{B3(KE+C{)bsq#9KhQy+W2lg5d{^&^Wy^-**`}qKdk%haO65K#=Ysh8jXTBn2d-J0)jm>FyF~hYks;p^*j=6bV73V@T;1Y53-Q z-sgS)fqU-n?0e4MYu&xq>IYt9MT9LMy#CI!a&s@!Ysrz-z3((@RxKs^!qsgS-5kAl zynE*qeBl!zGNdyjF6*i!gF2D0Qtt$3F!O^tU8WA$MozB*SRV2hEahqZy;FpinE=XsR$eF)e4wBzV-*N}58fTg^4u#y^ zTxYq{YuU;(TJ(Axj^8Jg{Z3q90m^|YOCbRk`k{#Nho6tXf1`<1+P{&yQ@qMk;>6l> z3wkL6c0sODq%DUx3)w834@g1$*RpFT6(9)+wb7(#6*D5 z#c;M5#s0n=2-aOw`=o8$kBL^7FuJR578lbT2H}|mhXa}A=&NTB_IKn+bFvV#7u$9T zg1uTZ-UEC)av&xJYTF;BEX-D-?C{K8-PzIT9TRze?^e-EpJwR*VJynQ>l!%GlfU%N zPGj2_h%(C6w=-MQ1uP9MI{`t>$o-|s$w9|aQB5##&`4fEp+%&_-gxGnhPmpzL0)NO zN*zk-_tM<-%vt2Q!{aq7Ebu zW%tq5wD^(Xit}eUa38+#JzVXC__Vq{e@e&mdsOJd0i6|~__K5cbxAtwi{h}e`Sfce zSK`|sO359n!p;88)RtsBQLoA}>*k8;>TZ#;D(DE$Qg?@~qs?N{#Eo{G5W&^HQB1hv zY1t{di{qSXK9J6LK!M~#|F*k~l=cS6Qn1uhQ&Uu-SGLsBo>C_xQ$4J+XtyQY%Gc|4 z5DW`{uGO_~$Is2TkF!F^FRPCqvmg!`hJOz7;x0ZMZ z_#!K|V_u;P`Md32-@d7Ib~c3JH#o5Bup;q8qBBS~`AQ;QK{`LjVJjBN#j~>7bS8cu zdj?2qyq??hAK8Ps!Ro4$J@uJei3OhRA5tmFL#vSX!1P^S=H}W@lbng|cF$DFi<*tZ z<7cq~@4R<+tWZYiDoXMaiXh6eFUj>}a{r86Z1l~>cenj6xoD~q*eM2WUxYSMZV0v7 z8_h_>J*xO7xcgpp*w(6{H4<-ZynVWmw-@nKW68uWxx?2x+;WmUL{JU_Ol8%dpX5jZ z%GAtYrrg{w@B1UH!)sl!!N5~LXX#9tC6we+@Xp$A-ZBugB0=;@LEXP0+&6}XR#BcN z@{k?am}Wj??A$A{ZyPJAqk%F>5S%d5(BjAKRaxub)$8D6pLyD9p~pifD@3Jd=Rjj% zY#b5R5O+=dv}VB#fzkW3!YQW-FM&mIVv;I`tHEE)bJ6|Z3+*48MoB}X6tRI43lcfc zvRQakPj!Yqz?10FlXjI_ZyL`>1lw4jUM6p}Z_VAjsjBY2KA3E{lxbJs9qFDdY9$5} z&c^7O^JixBXwvdatp&Ac?nnM~YZjTgeB0bD?s3ztC3}7Y2s~N+&-p8$RZ77t^{(l? zygGTWRI4atY)U~#PfxRZOiOdXNV7vrzr#qsgCvYjh&Q9Im_lS;5XY@KT3!yb#$w4J zgs9$cmD-kWUn-YmMxxFh~|18td`lFfe| z+-N(TU8qPl>J$(Jr9I(s>k0~RtnY2-TcUd+7UME9;GGI)Q45%n^-xcWd-f(!W#7@w zP3 z=5j9UyM?d2r32kD@OAF_T)5Hk`d$DF0`B4%bi3Z3TKsU-IMp|O#VD@T^ZUOSiD;AA*~taAL>58K z-1iNqsoC$6$rPxvO8M+9XlZiF{ck5Jmh0a=xy&zo9!mRoNSI5akF(wC@k)D;oohYcj&DmDALwo}I=jx@t z@1iK1|4Fv}k+I8j|ES&yi`>}7lL{>@i!7k_^Sts(X)ZlIP}|dsm%8JE-y0CHA-Gf! z9Raq4X^w?>$lo39UMC8*KKalvZE|DtPU(xe^5!ms^KcCRZT;Q3iN3Xmf*Jz>qD1YI zt4()-N}4~6@Rh_mSdq;<1i)O95p?&h^I~nHulQYI?|DE%8zBMSeIwf|m8^%u8%W#B za=3 z*lg}~k)i;fK6zpSu1{VM<5w<4b6=_P*n zpTnHm&Z(x2VCno?P1(>(Pm5WsG}pO*=*0Jvp5CRVdf7v)|HD9&W$LnwggrlN!W{By zv)0$+bxSIkf$;!Yi*MF>3#LLXWqbWe|5~I)*7gO?E)Tq-)cIOhZbO8Xptax}*?dm` zBpc`wySF>hmMpVkXFzGN@ieQv(a0)G-*~YR*)1hJ%d(V5IE{T~vW6BSqKMaYAfrS! zzqij}wCtTGt)e;Hhr2kBI{PbSIfEWIM}yRDgKf>@4B8FlsH>eH2KL1s_5!!Hh)Eb| zcUVO;+3xzM`H@Qn*wm~^A|j8ENQ=i^m+V*Xi2wPtIK`0Z-bFPmzJ{vc-bBckDUQJ! z-d48wf3D_148IzO=qd?<*Pab>jwo?i1JI+V>21;F-*x-82i#R$p=&PNg5ErkSApr=eNks;}%cmvezc( zEF7hGcI$9opXw~HZKa33 z4_A1*c?o0$c#)Vxmk!&q-ToC_|B4p}(J(2Rq|U6@@6TNHMr;bn-)dzHh1K*;cTIQC zxA4m>9<}hs)%FQpUw!+)Jzo(xY0^$K z*PumDG<*C{TDzH0b$LE=JY{5`mPQ|fyQSbb|1W)uGGFY@id2=Fwj7~a!6=2e4LS?& z&#jSS)FLYBs^f&xjFeA^H=}hyd#pzJks&AZ+38R(_*YMSpnPqS><3e}sE#cBGCS7H zc4?8@YMxm=k(84oFNlI8d#h+S9S#wx0viT%SYwW;2;=qU(6ZetNLz$+qVJNB zx41J6?Vo=2X3Lps){sF`{!Pk3joOt{u>=N=%22>boos`z&=>6s0ID|tk~v%)90<&m zHJc#TAs4C=ZKLayl&&!SQ};;`xEmvQ?~Shzhr??t-!`_RyxukoNBIdalh%znMekOq z4{Ov(`A}ez65P;4d5!3NV?r9m$9k>kSzeBpc>(G#k!I=+BtQJYXeYsOw9J<)Ta{W8 zMi>q=LgSlgx{Oi`Nc)Qcc|00V1ay(2Rckcue}B`;_RKLLLT%~nN4-aEWrC0Wn=)sB z&UYb1r(a(3e&H7}8_%WPL$wR^18=7@`Gv%N(20GPCwC(DTnj4YB?%&8N~&NTaXHlt zD=GoYEBQY&HI1vy?0s@+K^}KHxh9je>V`zU`uMB!wOu}Y{8_XDcY2d6tpeCO$&a~g zUiI&+AyR8hc5B(Obua=w5afA2sjmaVPNLKqbs0i;5?JI0`31RZrv{O{c}o82?@HO` zLZ1t5e0!#d=B$4Ad;N1Y{d~)JwVYG%LV-9BmQ*tnR9FO)gs5PGgAk*6ue7)%O53!l z0g)5ZQPT;TVzSHfxz5bS?UXnkA2;tkCsm{DV9bmtu~)0YzhSN3Pn;Ba`3am*Y{i5a zUJh+s4FKite?fFf+&oX`4N-@Rh?rbV;YdY+|`%)V_1?pdR#$qnEgQn{fN*oI% zvysu=~ zRk5gV1b0s+Cx04bH~EC!F_2;}Z4zhadF?7cvKzo%S~rxA4z`O6UOg=EX1CF45y&F{ zwZlnsOl9R3eb0z{=En0P;pP8y;Z@ZmRXO|Tg{JQY(FoWD!wYZr7VbZh$UOcM812p>Aq(gLnJ3j^ z_3&y6m(Jp z9b`P=5ek!=CWtA=vZyFKV@<-C!laINc|jDW!9nPXY4^!_kp#%aJkDC8O-n!kbU4c~ zpSX58lKO3uHINjx#^6gg)h1#6gPgZQgG)do%tKRCE{knUpg9PGlon_+{iby(!V|Xy zl)=4qv$+kv#PpXr&)v4XS-WoXEr0X4JD}B@MtKNC31ip;y@a`W>p7sXsPj5{phhE7ZcXGgSZpxHE zz^h;(tZ^10LJ7G_UCVU{v2d*%OjGWZ{g<%`Sh!jEzRed^Lmcz4E7qHD5ogtq0L`k1 z!Cb`ju0H(0c4Sl~#Z7CJB@0SZWX^wij%|oR-Sq=7>owcHyfSl#dfdZHV|&;@%H~*_ zeQE5{x@kLq*P&w)_U_fQm?j*qo^?F$(&rVQsk477;b7RxsNFC=uBqfFCqJHbqA$o` zqlPi^jklcju!D1)`vh=8jP>SH3$Gfb!|WvX6n4af-qRUPX!Z|45`UrO#X#cr!`133 zzwEW^Ctj}Wfv&$RywQl;py7h{<>^SUl2bwNBi2iR^=gaqZr0z?32@~_Be zHKt@h=F0C+>_>P%;QH#w^QzZKcx(e|zbA!zzk+m+hp0FVkt$Ch1%`yAoS(5oiAS5i zuu_X?95ppQVt95-*+%YjJvo1Y$MQ{Fd^zACxY|{!C5o^15ei_XJLz5)T5Z|^c432^A0&ICR^+#HlmRa>oz00npCxkBlhs2QbtBitS`N=NU zNF>Di0MS)ZJ6Ml{P)<Q%_17jdAS%}7O?|a6FElV0(D(3o^B-6oQx{o^X85Z|K6bFF z=WYAkABUbI!vsfKi}ggV&#TE%(o~BEnfKyI<(UR$8lG(_L+~Bavg;c)6h>=so?%d7 z_R9S@Whsyo8&qhS^^<{N*A@2C;rFdp!P&!9CJ)=Okz$#%@2#hP7tmcMW$AjWxB~S2 z@kvQH9_vw$q>BHR^H5^*Ub0W!g7)9e$4I>oycN6kRBEdeFvF!j)*mgDP=&NPRC;*W_J24w{EJATJ0Y)a z-fL@H;=Z-Ef0N)U<&yF1ew=X5QyR_9{)Z%b5+N&MFptCYXE5a651xCcBzxvi(IpGE zmAD-4-HAbjb`16qMhV%3QsjjmHl43Ok0sR|**kK5yq3(imp{49$jf@X$aKlwC6bV% zFGAkrEJ~aB-%BP5QVB%;MErWxM+ia^B~2`&sOi%m+$=hu{|;77Rf;+}{01#tE{ICI za-|8mfYjx2D8zBjgh&hBwZe>1Rgl^LOKhP*(zx}pZp;Op!1=3H75DaIX_XHQ8L#9q@(H4=|VIZ|xP62rM_o+{*cA@`z_$2J3m zTFAAQ0)$N`h`j-Z=Y`n7^XMMh{!%xs+ zKp2BrHK-MJ=0c-Kr35T?GT!88xzR&cP(_-R_S9W07lf@L%kqL$O8p#Pn-88!XZ@D| zM`QBfRsS$)zbkl{Q2G8KsmoQeQ7^pGS$iI% k|Lxt{Vpu2%0Py(8)F+cprQ6*8|Gfy*lwK*;DOg4PKUyjV1^@s6 literal 0 HcmV?d00001 diff --git a/docs/img/addon-monitoring-signin.png b/docs/img/addon-monitoring-signin.png new file mode 100644 index 0000000000000000000000000000000000000000..1cc5c4cec503ab98f66b20977a3c5a03419a5b08 GIT binary patch literal 96894 zcmYIQbwHEt*G3VTN{J{FkPr|Mq!Eyqbc2ADNRE*1F6jmxJxW@-yIXR=sL@DwcYP1O z@B91q2W;$q?!NDH&UIafppSCmIFE=Qp`oGSNJ@w(qM=~`Z|_@y?gPKn(KU9_(7vHb ziU@vkO5B>ka(gx&&vM0Mbn&+KG&kKFUzEaS7c;>Gd!iFVxEM++SK957Fk^Poo6x_L zP=L}eqcP{wo_w9qE@{@)NeB(ShZNnt|LOb3-Qm&E0wu(`ha;wDFPkU_W&7c+L4zKL zqv6i}?2Q2ayMFzBy~DPq`}gg)fP02( z*8&gz{fIU{>qDmV-v>MlnN%$Bzn_|g(Wut$VgCDR4}UG<58}Uq&F)=-PZ+HN=xD5- z-MjnaH^IjpGrK-$j#!ROcxZeR_(I-vRG30Ke;;6(P*Y%LlW7~Y5A?#q+BwPnknXh9 zh7WQfvxrhmUNu0aZW5y(xBsoESw~nsW)Va*%mMm_6)?4aPYfISQH(VwBux9k$mjbd zd&1J=_R7i&B!5ezVY5XkhO?{3G|XY$-+^m@(apNJz^w0eOGWq)y$z#jy6skUWO&d( zFLIS%Q`Z9cfBS>B!}fIyaqvL23Eot|$G>TwDxEALA zll_p=r-g;!e@ou%V>yW)`Xn9yLNGQwNv6j5ruf~)!r(`&%gy-0xqb)B$$rdZo(VN4t+Jd#r*$0J}*!`1lDP1@g0`IdtM*P@Zw2){0~Hp zvt6-PHoo`+%9f5L!#_Xh^YuQ`dEcG=yO$1{o{|z08tgsD*{agR!WYu> zz7IFL2RCX{pvZ}mW*DAr&02YKTlNc zEYPjpuXm^%a=tOJS_q%s*j{^N3%ipTN?G$Ka`h#n&)*ha; zom_7;X^LeebZr#(vWlt?kSx7y*alJwGCnL&Hg(_lU31mO!$Ljyr{@-S>K~Z^4gkThYcm7nI{;E_NrUX<{$6+s@dWJck`&Y0V!{~0S9+QrlHSe7U@?11@rvD(Pt~radjBzvB!Ou6j!C7rt_(e=EITny z9v6uRHYS^d*FTq?4>;okp1_wm+HFzn$gOZJp4YngdQL%r?yMP z!ZxD_CX+0~@LR#I4UJzph#%Qgg8z5==gAf=jJHK#>|tG1`{eb7o8gxg>DofS3iBLl zbbP-X!&SKq;3aXdy4m#Nbc!Ee;QeI_mhIG zn@&8V<6iaRD!{&K2V38>3b9yk=`rW?tC^MFSw~M2uplLSb61bfpfCtEV^+1Vv1iqK z`p4yGR4HTEp?7Ys%3es@!eI7T)75Cv6(?mohpcbm>p?1C4o%1Rn%^H>Ta_|LxSke> z{65}<&7pdNV%-i=%nErZ0ytjj-3b95!36rsn#tix{m1_o2l zH%8oRr17RWGV`=0RaH`|SXr;|JYT&H(EWx%p~H0;<$NF}-9R`m&+7 z^l+Uv<@6lJz`#7ETilR?5y1#&b(a%0h%el9WPQ*9&dX&p8HrPt&o?$T-QKJ6WIL&h zXyK4`R2;5d)mBul$f?=4NK(EV>^CnTgPM=OO!w24As08yU*yJvBE>m17r6&^R48K* zjRzf`SC_k(_mf1$ajO#xr+OpGkk2?l%9xLLw3f(}OsZNTwXGmZts=|bN=H7ex$(w} zs;+SIS;u){c^K_J%Sv*Fp3l8*+@*s9e@LBS17mNJ*T4(p>e=c)eMwnX-K}I@q{i=x=ZeWhHl>HtJJhB=?Vm$kL@Q4SkQ|~4Jp*JLHZ>1x z;6lRmsrw9v@UG@b{swh*9(W;YDm$tq`%FqwvbU#4m(>)qEV)(Q^~B?7j9{T$`7g4HGD?h@ zx_dj%8=NbVmO=8Ka)n6X)QH*q%*=Wp1?|G?g<+?+ecMjfmw45F)bq6i93a#(l-|kL zXm*UU&F<}ycG2=1Wb<&Ec=80XAJM1n%j$96KXT`?$i=POEFY<&NfjfgQz~Rf>F=h7^W?km>CVUV);bkf33R0Z1LmW8JK3gHgtEG1g zN1WW@i|1#JpOj0|wDUJ>qvM(n6LkW*PFN={|8OTOn%pWOIb@YtJ4w6B7)_Zuur-yH z{dJv^SGfmR>@E>49Q|i?f#kYwjwc_`q;d*3PcADee@+<%tuDGQs13|AX8JBZU{Gx> z506yq`Z=WmRhO$e89prNh>#uMdOJlMX23IhQPcIoy>WFer$Nd0I|T@=dkLzO_qeK< zp>(_gZPVs6_8!!K2+T1txmrDQ?hLZ#u1(c=yp2A%P(yz=-#gExf+zy*?K$Tz2Ii9a z?9GmIIkm5QxTqz=MW~`+h^C9>*mw1x)s^=PEIkje{McdCf@s)gRqt>VQ#z(D7+vib z9R@z!6ffMK;obJ=ome)Wt9p2fzv|@}!oV_L40u2(0@Dv@dlKZ!@TLbuX>M*#zH?2% z?+!9lkLi$($tQ(ayy`0H=vZ}duhvu`%tlt&bwBlkc2fG1kZe}&PADoX*K7oFLLbxm zzDn&2&DXv?G0M%&*E*MRa9|Qz+p3A=b+pLFrg{BBlf_}dyF)G@uds1t#+(FQ(@W$t zTL{Ye@Hqm`Lqs_;GV;}Nf!O~?baa^;Dt9_j7nFL?OS9`)%t?TI>DJ8<1Bp+8!0^jT zORpMExe~G@>AQb}4>BR=QYI#!1Dc>+Si35pqF(v(guCi)ORulT2d9y~nRNU!@&w5- zV{uwalP88v-a+w6LA}y;yz)C|`o)oP*>fhfSJE~d&u8j9SN9hKY;6wS<=@I^y7$Ko zEuL)8*>5=wJ&l0yIBbb%j^FqC#{bh#H!erX@U4^n4`B!2))x0m$CM_kcl%s~5miAv zwK`i@HIHB78kw6Pcgk9ZlCrv-57WLgKZmM22T=s4cj)Bf z7fWBaQkQq?d@DxC;wA@Yeckp;Pa~{M{noN1)@(Rwjuby|?X!@-$ zen1;)disoc?q+4qV=+aw$i-i<&-3~WGTyYeF5h%yA}V@b)p+eGEyY}`X#qyZCYTs0 zKT_K&jJOwG)l4H<>H1wQ2Cz>zN7A10oYa?_b7vv`{_iDWPs4Qba9JLSx4P$SqGQ6R z!04f@;T^A-vR#MUbEy1@K1vaa(l>qitf&~u>fv@C`Lt7hcE4q81D<mAn$i>NJrgj@2wNwsiJpK%YUf0(=R`1iQwX^6)Zu&3d%$1*qU0h81J&x7$&CO+? zm5<*!e4Qd-d-W&t@M)*4jHDco^TxzP>qBSfw%oj;EmU3GvJE1cv#B(Vad7nf%9eR$ z1=roPYb-Z6n5T?fQd$<`y7OrvZr?P;bAPMm>D1gD91eR2o!pY9A;Q-qi$4V;9tQ-U zXxH0WbRO~8t};zmd7h$dX#)z2F{wuN#|iM#&>SDdFP0hht12%8H^O44TpX{F;J#m(YA%C_0*5Se z*+xF;J;PhhhPA!M9){R#xuhVl9WdX^y(+RP0++z)L=dGUaqCK{s0?%~i3i}2usR>u zH8r`PCcpHilSyJ@=Q3K`X&@gN8+nY8+KdIEeD8_DfTjqOZ&20-V+=qM&146_lKWR!LP`}{OQlXT2Ql8cQj=? z?f)J^ANp!H)>nfNILxu)wp-Z>LdQCxcII2`l+Dk{$uDed-FLG$A013Rhf$~SI$rd} zN49+Y-eIPpVd`R1%h(HE92;AynmZS!800Y?9@*NO?$?-o@4OKw?C_e#xllz~Y^^;g zT{Pg-m2cJ>N}H8!M7{A!0>)u@G~wS_yS-}}-jU{?v%0)I72hB#dZ{dn6CfOC!25pE zU*Axnr+P**H%kmU6cRho6$Pe=T6M;?eHOi3cUmtm1B+z0`#Qj~8HD>NptVlGttls` zWp15Zz)fGUMNMjb@9N2JQ`7NUiLf_?1gzeyyJuviys~m-eD2KNC|i3p3h4I_Dsdxy z{dZhkl%aF3p0?Ke@k>il`-`umwV6<5r|70?A@v7Aocj%r)98X1tcoit&iku8xS$ti zc`GWXXIlQHjIJAf`_Qfs?6y6Z`ZA*dr2Vy&a>v>bG;%sQ^oTN;fZ=TeU688LLZu?o z4>0Y!GgTq0=fEfGr^O{1(cebCY_FUhD4`DzN z&r;~$Os-mSi${G&!Xq-7DqY^1*n$!&u4a+>Kr(jCJ2b+vo1i0`q}b zljKx2h5MVXFCm_%ybxjmHxW9*i&?xz+rl$D8=K(5&C^S?mgd%(bmQ;%dW19^HZ-Vo zR-SNEvwS0UGXnun!s~Quvguc{p=}+rI zU~Wzb5&zD<6Fp@bwLJzpc=6QH%-Lqtmf75Cba%JfYJb|xhrD)%%oqID0}YmyyI=*@ zPBMOpQt#Qxpw=9(tG#dGsQCG_zb<(DrMX2_g6>CAgY(i%h8ZSD<^E`TB||3d%AQ;* z1DvRBIln;PHgAgBM2i5VvPaD)LeUrldSQA>J_HvhCva!kp5!}6(#l$z(bQ|vW!c&2JPMwKGKbmmX+nTzgY7>PU$9rL-;)Pp-|y~rtQUb@IE@e=#N!r&2gS^ zb{TfU9XPwIhn$?2Ta)+Ti?yvA(@YRF8)?w*DueDNCuvj6ND?OYK=6p#`9ZQHHz|U@ zEZIQ)vYVlGZ<_FKnH}Smn?<8VoI8SPBO@c9iT*I?TI^L-{uCt&9ny*{yKfcIiPp7V zOBUq&rO*jmhbwFQC)dzqC1!ep*SA};{(FxxDe`{7c8*^|PS%G192zl2Yhq)f19xSE z)|~HtBw!W5{xw$nU-)L9K9EIzk_{81Dyl!CO`~fBe9X%7cLnYi!YV3qdT8fe8xs5V z7Hgn|t*tLe>fgDY{w$@^V#oEJ_&#w78!nv2JA7YuUtuqe#=I_wto3ml{$n+&RNVwjxuCd<`*-J-;!s?*hG zTH9T8`T9of!dCq@HWw1bU}SlXq}va~lvXj=#9R%F_m1f)oa3 z=JWI0tJ6b-`$jzHCU?vwa7@!pay563Jjm(6bXG-PRbW<9v8|avv2EO1AWo;GmLxzN z=8r-;trIDNVbnRf!|S^egrOLCIQsCF;`%4j&mlizV6VGNyA~_*ys_?keWc@{X0J<4 z-BIkuaD*9&+|bD?y9;PbODmab*xJ}geKx5@@i&!gZt{?Q9sNYAj&|&6Ma#XIuCZ+((>{pEOL`% zBR7wMQ!|A{x0E7~k9i>F^eZ>vBfnZVt(4q1751axbYR*=2+id~+e^Q&5So3>NA$md z!xB5Kzo;0#gI*hifmt^mr(Bj!Ezee2jsr%WTH(OSJ?Q8W@5M{jv~R0U5nW2$hHi9( z8*5+j0@{Fm9)L&w3^#z7c+=&2-Cq8K{jpISi@y56Z}9Ng&}Su$Bp6JECHJE;VT{2x z*X2Hu)^)bz;(|2?@%moX@(ON<2o(`?k}9FW(OQWr#N*5r>uzmlbWZc*vd+&D?&q0A z5Df)|lO)f}0KC(!v(wwNRcIG(!RLyVJkN6+h3}ta{Z_Cf&eZNeLy>HSq&>z+>MvBZ zFYKj{mS?NkyBg_)Q<;)id9-v`pFhwVx2MVam20x*qGvi!sf(4Osp)0DYySDXT}k`K zv+;S5zX3{6=3w?RVRO^;Wb1mqdlYdqs(lldYuw!!)h3SnQ3`P25Xr~CfB#4?cxV{J z$eUji@*tp1lzzcErzUe@?*}^ICcm$=@_yE}VKk7~462!y3<~npS6@*RU=4WNYm+TU zPLd4SKr(v>5Iii4Tjm~EusXlqj%9j=NH|<;f2dTzx{=1rg8@C+LOqRO`tsgJ#7i;| zxiDS5ZZE1ZZ4p5c9CROBE;~~3O)nebv1zi0ZuCv)RMID9Wp$4lYQjg`Q%$b>X$I$Q zVl3DM4|ES>s`N_KG&TGC;tv~-I*KM<)|WC~x;18Hxm58!RDVWfrwMV{nx93NV{e+ol5|?Jm&cv~u^Z z=bGdAdWBzd;&`AIq6W?s^Hrv7Q=UE`%4}pb9b)JE%c#@^iM1otVsC8zRYKA2(==?_ zc6mcX!{==*Dzqn~FJPKXvCZ1s&M~6u4%`m}CmJrViN+d_j65%H0Ys#`afw0sS)kX$ zznq1YwZ+J_$ z0)2&};MhJ3Z`=me6;dbyVY#!PDXo3K=jN`jzj(1OT(70g_}tvW!pzKKo<|E=o}ZtX zxb-E}8t~RlanSf2Eq$EEx%iFQZD*VUTVlC^KIDG5;Xkr zroinb**UGMs;tln1pqAesGNd=R>lN~naF1sLvdFPZc-VkgOSnEV$#^8Xxq%(SIsUXInK&0I0zw-OE&)$Yk2{P(>jv{)PJ7*#(42tNaSmc*_hqN8u^Kl(y(r zVkVNfz>CG4uFDP13_rC~hx`_=Zk*8ibVo=z=QN`m^?DZ&NRo6h)&L3Yz z2;03Cps+|K{MKWfikC09TXdRpH>R^E|3oQD!qR^fR|tqS;nh#!eshg_G|p=HkW$5q zV`xWigkv)s|WrN&T(udHNwt95T@^+FpXe%pkhd%OUq0GAeBB`L4Mu(a!JL%lc_}aN4vP~9~ zZwfa|`C$5RoSj@l`B_ZZZO_fu2ZBp#Of)q13#~Nt-8qokWzTqj-K61&25OJ{PO~1S z*tHk$3Tkg={8g4yhQLgfoQ4a^oE`2LahZuzLBbY0bS=@=Gmo7+oxAp27YRUY@#)QnSBmcYqa zs#^HO?a;l6CLvX6esv9KEtSSW(6%;za&p@Pf&H=kZ)3s3kjCOm{Mv{XTt-(iBR#)% zG~dmnX8B~C(2^|Ub>kN)pKpVyyGghj6jU7Oe{4^0(x}S6Y~80w_BeXynjEG@dkbG( zz$9nA+p*0_WxVWZLxEdXf!-4gjU8YOQRg-obx4|%>oz{Xs;0~;|9qm$-7`-{gx6`N zs=E3|w72P4{#HR%`x4T0vA=`os1uqkmt5j@o||&J}d!*u~_b8X{rrX|yX?>RQd6lrEQY?@x(xvN_ES_t0h#ijhB_Gs?H*B(Ixtg6`j80q1GXds%KWlX;$23@|9_F zKmr-%KO-te{&8zu5-C6?Z{?_M&j~3#pgu~*;+|cjp}T>EBt-!Bu0{K%L%XSA|5*F_ zB3rZN8zT?cA5~#=S-G8Xv#{HAIa=hFo}ZtQTPqav@}n;4H?N1386+;|nmCU+P_0Q4~BW=nkLFiPkXxlo(W8=BDRYd7hOXGrc%C6|YkPa99#{6~N`e?z&QM>A871cXe6xMDL&;EvI0#U-JAYI>Aubn*t^9J9Favp@he~N)a!6pDpk8OK&wjdoFJjFW+Q&Plf z<95kkYw|ounX0lBa6f6!9%}&Zm!mt(LbFP>kt}@CUbDK&|7WyZE4Q~ zKM47Ij@wqhveGTzg`~fg0Y5N_3yt!FU%bvx3Sa)z#n4j70e_C4uVr~aIp6qmZ*7?@4Rlj{H(M_xVHeq0YJ@bQ*0&Fu ztA?*We8k%Hf_iFwzrnP?Zkyu-Q+&m%+V8i|V-}3jn7O-=_6FCKNcJtJ5s6J!~JRWQhI78o(>)qa=-Yy|{eif~LMj zH+|UN=K=g9_G*oJh@ z)HG;HSIQF$~A23OY_%y`Kg}BcgsexEscgj^m zgYkpKsJtmAjKp9|$!`Oaw{hy8`Qo>ge$lG|AhtHKQpUnxj6@Fvm7gIw3{XENoYF9a zK-M+WJg7xtVw1KX-y43kJ8Y5XdtgxUGSFtV*Vf+m#ZO*+ie;&aQ3k2l^h3>T)A_On z)0d?3IBRJi_&qkl5cGYG?}UG+ze+)ACt;*_0zq68> zSOBOALrX{fWJ6PXoIA$H`984Pd}dp}y0_&%UrzfkEfMn(9C`5{TqDk}v7_)pytXm8XiM)KSu(T+Fnzdo_8Ya~>M^TM zLV<;w4nOmZ;kv{ODtyYHU94}tW&jq5zR4|zH$0r&L`rHBCdv?=!u>@-ohewas99^g z!tZg+(lQP(fjjoxt|uc`=Z{$p5s5rIR@|u)V5j*HZ7;Dwxmjal%0TQ{;F(09)gXB0o@E3oTdX05b{iX0D-EUhrR}F^Fd}BCMwrP8y_YbN^?<~)u z($Zy9+XLNX$g#1>$uU?=4CNzl&LZK2v^NgXZYj(LSJgo`1L10n?--ezYBD9qkMsMs zeh~mMS$8R)>DblubbW!woP}AgysRujf_!-56FHz=@zqE105nKpp`29C+}w~Q?VEj$ z@f%d*i_@uDU^&rvnG}@`PV&!jvkz$@mtMKKh(h(#yZGgQ?+O%8YG*@Vt-TL_0HU-ZNW<5b}Re7W|9#ix##z933>A2vB}}tp_Su{ zFHA&;$GN$D7i(;qv!K8pRT^E|RB=jH7TPZH3VIN?p0{gqQ7C3Ud0f_>;%SyfrdO&T70)K__;628dB z7662i6u#Wa40LpE7dfk3jfWS@!HVIFPaFCH#%dGWE=m<3aSO~g@(yW~h$BjvwcZpK zSC`AA6xr?{Ay4=r5N!twBWNd#>41r=eAXL~=DukbOv%R3L#<-==Afg*WjfaQ5E z39B@7cOA~ifi3+CQ$Zv(P@rNDnwqX_(z~d(rsg6?r_4?{g1w`nmK2mRD0!zV?$6g9 z_+MCITT}t0VE=*@06rx1IpyoZQAn9^1IPZv*>Rtdan&`b_o6}kdX4|@(0ybfMNiI0u?UDWk3WDs4J#c zQ#`F(eRDvmlamaV4cD%`P<14SWK=jg#O>_27vwy$C?nHgL`6HE#iO)vle9M`Hn#f9 z4uG%$y?9FpwmtjZ0^k-*U$`AJHx4fBnej;8rR7K-QJ**7rh6uF+U@`l@Yam>G2juzhf0|tW|Ke*g*qPyGJ~FG zO&2tLlmY+}i8un{5YcCxCki|=QcuLZKCCw2deLPv_Fi1S1fl?&x@l$Q*j0o!hwfFz z&X96zYMOgyCed~jR1(G%ohUCW?Ya_g?_an(*>EjmzZ^m$FoivvdX_i)Rd3(+1I#xoaTq|vw=YX6MokP(^R*3bZkRf2eF$7|o?W-g3D%1kg|k9& z4N(9Ed!4O)akb{%3Xo&neWaz(t~$Bcyu!-d@~uCM5fF7X?#6-!QT+C^fVR>TtG6)* zJ}rf5Y3^-@;yY7KQ*T`SWRSmmdaU2i)Ip|yz+nQfJV$<{W5qJ5s{=UrIOp@S;H>&O zbEvCp!ql?X<#b3aQxY?M-Mkeyj_l@96>UxAMZvKy|MfVa26@ixa&^q*K@~I%qlqAY zGdVY*)^#}e=zi78yDqeZtcO9d^M%F1Oh1JKP6SS;vxQRU0a=+oN_XxzU06T`Aa-eJ zn9)Kt_J8^mMGoL&i8_3 z=#0b#2P=AtyQ->wKwi?{?GXRA)U-CNn70~1__a2fe*nsxUs@g59B9{T>GvZUdjVpb z)sQvhC2p+lbOzo2+pV@8cl#PvLs7}kKxEmj8KKx?{Y7EurZ?JvZ=Bpwm>937#5gXN zITpZ8iT$78QL{es69TGvq{O4Sla_W30Pb{*iNz+PWu#pIu6l{*_NzT@bs!}fva1l= zJ2Nyyh8yrO^`u%G$?+t(%8eQ&NdFW!U~8e;7l6dt7tQwUPqx!&YjCsg;dvD0n5R8>x>fvFShxXdh%qA^|_b zrtwV1zD{1Sp>s zT#dt{#{qb|c?EfuDch*IgTLge)|;E;Rl_ulR>nfS@*aVPB; z$R@M3txBISGkaduAl%~(P+2RT&mthvv9T$Mi9LFibFW#-Qy5a11$@A-85oRK3s1LN zVR!eyZJA~NSXi8ai;7%r-eLRXkiCX$0<~1uoSW++`JYQ!yZcm_&_4UvkB!-XSmTtf zLj>t~d0l^hH{-pHF+k6S^afa8!>%fD#}rGHV!+mXevdL~7hqNBX}Y^}T6EAT8KuLI%0s)zG&fEiN5;o< zf`uNU&IU+?Z3BQ(&RWfJ%7g|3i7ytrWG@ipZmu1rquvx?f68FkctRo^{#u>B2Mn#- zh^7c6rJXpWb>^E*+^QcQer(DKo0x;m)Me_2&kwS=+KR1!*ft2Cx%NmO!-X0+s9WTmFQGw4tXdH$C9eJO;?apHnV>xt;TPtp{JENd-q@HWW@4&r!j z-GejhryR0?!Zp)3COS4SZE5#w8QC8;a2de%gb)>|m47|=`%~szb58QkZEV>_vUG7` zUs#D2;k1JSK-4b1k{do%U;L_GO+}Z~vp+R;Y?XIeHPtu{yLnkTNl6~EN_BZpOEg

qBZs8&9{L%=%tJtTWO|ue4T$QT*QlzQj2^)*ygD z$-9NA4hvky4c^+&3X_1j?QC_Y);Jf{Ux^|Ig;ERIjpUs8k}D7mXB$Shgp^k;Si2Xn zNPEp}Y+dz}KOMI6g&rO|i5Ok-q|YCurZ?bF0Ym|Eb~cx;M4Zgo?B+_`UURN~Ib!H* z`LC~YaL&!KhCLWHPi5n!12w_eCsD$VgHmZgS_r7MUQ{vUw9RFiewOU^%TD=hnq9Ac zZcR+XK^*4a+R{a)QER=pnVTz@%n`Ad(IPn?nH6m9nzx0UtST0PDuC@5g|)8lzocy!hANQ~qK^;fI&JmoS7G z0xyfMghpD0QM3M!+kQNx{)Qx(XzSs`MJan-pf(4=>vsL07h^t%#uuP8{pj>HB$gCt zpV}F)2DBm1bwZ&fUuOpwfJ>eO`+5tdl#NLIZcsBm+EGf{HYhU5UJ3SZu}(CG`{Fa4 z$8T?K%{#Yj9*d^`kf44QOkhCA+Mhht0_IH`C@lPTEY=Qa;7rYOZ*^AdM8~yw_~` zNQ8YSi?))+7t*pUotsOB!#lOIjYf&1NYtx!h5!MECvaKL5SN#g=#{JiUNQh_TFxGf z(OUi)pMnDXIA2`7oJQ-;MVaBss!KnA2tI&EUXX!0(1nnD&XLj86P&xSC<28j^-x~) zn`HOilf(vxMR5hf&2I(AFT@CFTw~hXIV3>35na;sIDXzI@0_))YNk^^0`@tXh*iuX zt$Ot7CRKZ=NUMspMm)Rg4d7>aUQZ%;VxB(m!q25sFulKfHz*lJ3-LSxxM)BD2Y{CYS>rU_L%008Gm#3Ki^HW5ZA(~$V4pai)J~{-8cXNG zM-W`uM>wSu4I26oz&ym#e*m`oX_q{Y=S?Ug_RGhQYG2$w$0Rx6>)}AZ-OyPjfGAC23M__C zgruJrS=G(Z&hfw5(?e(^H2}bZUdb=cUdZeA5qtOcKm}xWKifl8&7o#E^m3n%_0)Ly zPVzbbMktW6w@aQ(7oV6jje_hqic2j|y0X6hNM~qPi@j$ys9jU*PG;_MJtnu)z`$F} z%8M(nUwAxijJPc2uMANn2H8!HX6O4X0h+MODON+Ca4y+4j-q$c+%cOQ8|9brcYq)n zuLn1?M4j1K*X*v%X;lPVysLc%G#*SGX8uTx8QZ($<|`rC@JMq_kLgNTXz8P=> z*?vbI{jT30QXZ}_fjMnL8G9`pVN=@zpUNQw4*}iTS|(14`#Dk@gi$Ay`ZG62Gyso- zle0OLab9j>jxDHW;8Hi>x=z!u5_F^y0&Tozs#dLsbQ@6FjG%Gxe)I@Utm)i|KZDqi}M*a>N{JXm<8Z zup-6adBWUH>#_~b3s{*MN{Z(Bl&21!j3~~h<`hZ* zyW=&08)_S>H5g76?fyteIyWGSYAk3Y^(9!Yz|tpKj7g9|!{*?^`=%ZXl|5pUZKI@h<`=+Bo5R9` zQPb-!8_(@h-9sPg??Tf!jIgaOB;|Mbx)kU=5*4))BPR7Q6=0jE6M;}-fRkgynGZNv z7wf-T&>urdmD18E9(kixMY)QP5-N{l&QAani-xXEM@T68gcY>l@sN@ZnbP6=MCgaa zZ;sYWdpKHINU_tp)ZNQsiI;T z@If?Nn!op%nLb;)#Bdj)59>@jy`;nBZ9lr5J_4G;#4?>EF~>)k8_VO`E!uNvDsfgc zg@dMLPx|D6g3ZmNtLqy0kuh+xAhFl7@b|`lKBX|n>&CUV?vr*wLp7PIqOYdOeYlz_ z(D);P`kKFC?4u_>K*KCah`3i-5_jz44zQ|`uVon;txnd4gaLQ%t#;!ZcB-<>Ow_uS zU#no==NW#89`NYj*ZjL;`keLPwCS@av6>39und3yJM}C*GxLITDzHNUw;}c8FNO(a zP20>mth3kLKvrLWZVnpW=)c5H|4m#y+25+ZdHA!6fsEmp{WC3In@H^Cu?+E8DWf(tjR|WkBr#);sd!}H_&{>MS_5wghm{Fk&muZzat{9h@uM=Ur5;Gg-DuM=zRbISgNW7c5{Ich{1#rmB44W#*)IEnV#NIzLi$E z$%mMuz^tTN;>CkK5yChAhmU1Zo8iNNE5f7W0U?`-lildij;8*Oa+D~-io=K#E8fE=>PVDZYBdOPO+zS z3rJOvmXt)=hv|ZoiSFOK1&q)PPuVJS+^rfnff$IY9aO(Jx$CK{qV=kmZAr*y>KneI zGAA#u(fP14ucLWUVGWyJ`!7JrCJX0w9(^irIejE1F3#I!?O+#-&xQ>h%Xg*{4;|x( zjxz)l`v%Lg4+KXz`Z;g}@mV{u+tzBuKu;bFc|;d(2F-2MT3B}NXgwEY^_&O{?y-q6 z*gft3cQL2kx#YdXK+b%PVF|O)A6SG=QXaAA;s$(? zn+s$OKGnOZ?|yl6^5LiL)WdkRfek;-G7cx@nWW_H3?-{ND_M>+n3~d<^H@E8xR}jX zX%%D)4+}fisBGkS=sP<nm~I0>=K?sBMGh_F6lHn{ovnjqo=%#(AU>*Jo;l~ASov&=e*Iq3J9lU zkid2F1pzoU1;K(Vunq+Eb|<3eaeUa&&a8w%fk4Z(Lgd-q`4<1nfah&b-TBn5TIx zXtK=FNL3Y$6~qcynF?0~Zs`6xl2)o$S}mMil23>GQ+ZQkstUf#_&UbVh(EDJtpO2a zi2~+hOF>A0;M5g)^+TO~HJpi#&*lifYoC8SJL!-lcW3E-QdPd%-=0h2v(Kog2m<5^i>#eM-mI&* zH^m95;XnVo0JGG|=lddL`mn_>WEJ<>HpKei7h^Ae#5a<83m1C|-*7U9_clj5ryLSs zN=A^7y{v)3sDT~CNQcH()j$og(zluphgL}Fo#(+#ed!!!)GHUY0N8}_^A$hgamhM>Jbnh_wU z+#oTDAmgppx%rd7McV!|xTn|O0_?9#ivax9a9==@QmwNeQ(!6loa1h5HT$$KSnbT+ zQg%*kJG3>+SV6zqEpRtAA(;{!_z`^9t!qPzVE>88EUiMi%j?aicz)BJ;SL6t5PAa^ zF-@;#4H9ijv)sBCj@*Aldn}Z%k+CiR$JAHHMfJSjqkw{R2_gz2-OU05QqmyZ-3`($ zh#)Q9sem9IONS!464J1AEsb=;Z@~BW^ZN1Ea(DOcy)$!Xp68tNoar(&tQuAT#q(-; ztjNs8Tqf-Z`OwbX)O;X%LVF>vBB4ae>DAP=1QgDrW2z6uM5JSd&Rjby)77H;sD4$` z@`F@@B?SFA z%>Ysx@~vE@FH1;-6NIqA=@-g#Lf$9Hw*Jds-bC_wZ5hhT z6MaYq456p^8pAIhL1Mc#Lqq=fey#Or+rxt@2INe$1Vk zY6+T5k7inLuM!F8u}sbq@nKJ2h`>~p)##WSsJbG}Ij!M#KfW)6p2syf>Ulg(p9GJ@ z^LlqwbtCM3a*tfnbsNWLC!@hvqq%OLVQJPbLZ1D>hxPng-0m+_D57Z6Xt=Q?`>|~> z;pW&R;`=WSk?zSk@RZiM>m-IVO%^fQWq9r0RB=Kn+ejt>;C${2K;Npysw(#cGcJpP zX2f6JPk~KO$XLp}(~7Kjx_T~Kj+_QP4~lFzT0f#jUQXWhv5zs z=XV0DwNfhaVn^F|OKNh|cQ^N5VK|3JD(JINv*LV&w@UVm0No2J($L7~-2G|yOXwcu z5aRt8s*$C&P#|w{OU^WTwgWtsMgh{(=rFvB6+Z+}&Ff(ceb<$wCh3B{J|N{is$??x z&))%w>_ZC#`8S{cUY>=7w5`pDWk?j2G)#j@ugWb?M@z$T`~gOki7fvAg;$WrzI&vG zqG-f*Rw=(`M%`>T`u*J+%e8mX5!2YeG8ld1-`v&P1GwcgIPq4Fslmdu8(!7Zy;#MW ze_>?#og^8t>OKj6zz31h3gSgHmxxq_T$LgIHc{*F9>1{K2`MsDVPNp4Mk1u z$B@(Qw;-u?6rWVLHk4j#jrTiZ{MFoTP&*ZwKV3us zwz9D6dQ7iIC_$JP^{Z_bQSSU@<)xF{-O1OqLz3@Bb?kGbMA+#_p^DKirsI&R#t_3~ z^M-3`8u3HbWzZ3UbYWQsn3QPKm2x5!;4z&oh#?6Up-(V-Lhf+)gZ4}FVTO33WL>N2 z22N+LzFsHtGz5WdF%N^=CDAIFoCn*&)(ZwOKT>{q6T!DT9qudS42DQRu>wZidy(8fvSU9#(?^>N-+MEoUeYIdkaHr6 z8Q>k=6`Hf1k(+3EBujp+{!=6CHI44u6BQ+PilU#kH97M$U!wgKzCQEMrPK4Dk#7%< zOy}Kg9vafO=u0-!UTr$tMg6dF@9H7G%t7uM#$(@Psp{TXpMB|q#P~#3{a3AS?tK$V zOC}YshAR`9^c+_sv{F}fptgIZsToj=b%Wc%V?zF7gbPcikjnL2Am*W|v51ip&qc?< zt7jpo_X98nT1{6rRt8%Csya7W*^DOUujXqZbZ=MX@8%&?JpjskmoYGVbM^jV;{Zp-L(X^HahYujekG4dfW#Jm9(kQ>2 z5ewmH+bLiWw0tYUm0Ihve*ac&KE`FYIo1c{lFwd!GwvBeO%1lzbgcXInyf2I!X$pE zAk*(Bn+ro&I=ghNg@KPJWs0r9Ze12PrE(1W^TM}m7(A1~G-&Pxgzr$#9J=(p?sj!2TQ~SRvt_|;2!b;_-2^57d#5q!4KU0Cck9z!r zGU!vT6n%|dnzOxDVr}9O8>WQP-dUi6h=etX7aOj%wRN{SXHij)^3J82u5PZ?b!VrS z(zH(Pw9X2igB1%sy=+w!*Vx*@g99lTaw2h?k7O2unkuR%dMGKB@<)V z#suKvGF#_AfNtN>@DimXA>}i+ool&T+%wl3N0oty5#K@=?@cnSQjyPbQUx!1H4J=> z`Wg~6^qevV5YEZNqf3D%8j^4dK9mPD3Bs*X~r$im5EauxxB@=Aua#S}zF`5JLN4^-Tt!fpE z_@tv$w(GBjv{^i^y@j-jd@Ca==JAP%g+)^nBe)LsxmobyBjIv@j;#OUZVMVTt*ogl z7a!&lwe{Qo!i-mzrY zAXy;^M={@X;kJTbn|@qZpf0{k-p6$Z7Mv4q&-x@_SyU4SrBzJT*491_5x(BQ_(DwDSOh2+O4_8vEf_uTQ?N$t4`umz-ZQSFq-l$gV(lw!Gm|T2utL+Z=w# zh53P9l28H;RZ}C7_``MSe8A_ZlFArAPWOh_6A5bk7Mm2wPWQQMZ<_RFzUuu9w1 z{EG_0CT!`QkQ9bTvH$w2`WWjoT26Wu^44#@0T`pDmDSEX_j819ER~EEL&_^hU97Hf zY_nRjE_&A81Op!WYOFGas z+@eXWFS3qc-+g`}vUrrAXqHkjP0NFe#?eW|JG+ujkigb?dHvqqR6^;~>*@NT*%XCi z{R6rtv)cg#G2LdJz$Ux8??Q*PnhvYf)v8~KShtvQykf4*oq**BQ7?APsivJWK&n2k(yhXhc32_@yi<*I=ju~@3wnjg}%qP9Yci{_Co74Ti;(y=E zmdv(1zW?vF;5~^w^M5bb(7%f5AY%^Jcs#stzx~qdlNFkS&x0R6+y-(;*rYGS6N%Ed zZa?e==}cbI)DwIJ$&^B)oIm}^mqeu%OL(t}`0m|T?T#d34y0>zrtcRZ9qbsS;AnQp zOK%KPr{7pX*rHFVZa)TswEr(y{2%t`g%TzzI;e-IQ9xFJVes!(`7t=!2C+P23anCt zjT{5W-%-e*EJC76H$&b!O}Rar+Pu4)wwTvStXK_CMBgcWUxLscZu5R!6sN?6aWwlB z(}l(FTn&XZh%4F3j&gK1Nj6{5=Rr+~9}_C1LOv?T#NKSpxdlOy@heX}>_Qlr=olCn zu1DIYaI&{)@W|f++ETUgcOiXkq zXY`l@qo|9^hzeVtcD~x}>HZ$m#lQm#Vxq)X=i_sTj{TwaYO@rfthvhK95Kms|JkkH zlC+bp!aUV#JMhH8DnU9-6UUCJ2isfs;vshClvLqXQ*+g8JxYXgkMbhAPP7uG3)grG zIV-)?6HQy^6w%n_In(++<%-k&QBA;2m7^gs@$-L8*im{dan~>|Vr$5lGm&Om8v1_Q zNv{WjfT zwzJ8)*}(qQc&%8ASXE=!5L>6t!;d4TZhwFC((fjmRd{@2fE~iTy zjh+Iq=52^nw#7bBC4eiT6i?MvA+V1}8RXdSlR8()+~tIQrb62wv`cLZuV2ZBYm{mq zA95;gmBO3tF2?%Ocq2&JSFd;b{v$zk{@a^3#?mf4 z>-MQNY;BtKe@LWz>f@(3;C4yfG-{o1Wt^dX@}5$2aKA9jn1C3K))~hQe zAgSznEFrBHADK3GHD?l?uPkl)j}72|cHeN#9v~)tSO9aT5ud2CIH}#aoGo@{i5IW0 z$Z4smuP>~RK??+OAuDCd^XZkwd;V<&XCzAz2k1*Qpnf!gy@31AVnWqBvaSp*JA6?e zA%B{*y^9YEZ+~(1SyG)(*|6GASI_R^{L)F48;&)b#L(rl2(|+DC4l?Xh7y6GEHw?E znfVCIRTT(pDAM|X;~Z(Q5j=5j*ZsH*K0+tZ_Yrj~`1?DygZpFMI+9Gmff>L?XChVPU}IYP?t{g%}2J zwy6C|*JCP5*kQ>Pt=zzCCySz`9-@lbdvH$5Cjnu<*H#s3jete!aDptn4e+R>Otd zv%N$QX)EyW<*0_i)-WO{@bc%jKoL~ku5`90jveaNC!?mMQKp3#%}Y#3$gIibu+nSh zS#i10F1ZoB_a_hssCez|wX58y5%zGSev=BHE+}`FFj*8@;NQclo6kh9f3w5-ba*b8 zdb*0Bc6}R@pxx$i9upE?UnCIoYS^bk7O3P$Grd;#M!#zq-mTVc#6L+5A(~%}yUSNS zno4-r@jCd%xeRDBjDL$DO=CIs6GW3F1xXSDCa|i+n`WQ>Bke5)qwERL4hn3NtxG}+$ z0iHnJ&gLUzz?1feD~oF780X{9K#H%NRy~5*bL8br;q0`yXi%&daq>Ee2~SxNu}lw< zf2i_Hpqr)l!~&P9;~Je-C5={#wa}MjT`imN+LZitKY9uef)bns-rG5IaJa6Sr}7yN z3k2cmi3-H6&zPXGji2NGg=qYE1Y}m-93s5G_)cOud}zs)1SV$TUuRs&}HA}=$zYPo)xOq*zrE= zIyPQ~p2i(qZJkYC!O8zt8Bp$6AZ-a>sK|f;wOBh0*!T^zcwK#70Ap=4@M;TDn} zB`_4WvSzKA{tvlXyQTsp;~IW4;trX1j5ng@b#X{uxcgNkZzUA8L1sMBnY{Ynj5rhj zhxU|8vV(P=x*c>PV7gI$RLYeFq!nD*>)R|g_j%()H^viJrQS}rNKtzDAUL^V>!g)S z<|x0RfI%fWKooODE+X%!W*uTInpcK8;W%(76?%*vO!RB13x z`F`?vxYs}pfK$w12qHy}6#WiMdv8JzW++lCidJDKK==rUl+9!I@`RLh!~1B%c{RfM zA@(<_AdVf|>GIN?iYR6F+5$6qpimz9#9pXsycdhA^PNFL>9;v&JigkfE&Lr>CrO-{ zRxDMQlZzl@SS%A`q8WeG%P@Sv`O0ajV-7>W>VF?^*7F2ZJG~u!a(iR*Bte*LF1+c^ zYHHNd)>e^E^ILooJjd5*6_&r*em6B;a`q>mEy@n0=39}Rm*>0$v&YTP*35nwE5vHQ z8chA^2b}p?2?_;CK27BYbJrk^rX!myH?s5f)f4esm|$PBaUU<9D>om^ej$O{(YR?n zynfid@Wp*=8v1UOLloWG{;q+^V4v>=+jq;hh2=_F?c97XfF}aGSyqH4OPxG+PJCjmNOk}yXzC9ETpF(p5#w{*O(g;#dPQR^sTrSfWaPf@lB(5;?-7)T<8>T2kP6~>&*>ExI zgzXk8!Zj1M-oEwdpCnG_Gg0@{+HLW@01|z^T{r?o1XsOS_jtpGbt+NN`1G44mvw4z zF5Mr?-&}pD;j3ku6~*=SlR6~2nr|bl{+VcZK~UNT?f&>4>mSeON0A)>CRf}rdpG`@k>yKLE=pRp2xhrjM9{d`!52`bk00K zo}HnV_4&rI?>q~02Ehq^4s(gq?)w-iRa_4no9|RTfo)Gr^vyMKgOUZ#_!1Q8Wmu+N zF+{?Dyk?8*^9aF5S@gTD4n>-`qzEE;16}%oig?9ZmYtVZ{~>%Ff{3M3l2Ped{u7!! zFC|7~xuy<5)jxQk&1=Q+N33;$QH>lh*SfTW=S*@a59{w_&-pD8WM zgfxSnt9$!^BApV35ryBqLINOn{M}QF1VC1p|YM!(!1f6ub2fG++D`Qg1Mh7cB``*%DT5*btR zz2GInbF$l3Jamx?Yx^PkfFw#bUIsFAcgQiqslVa@3HJT_hLC>8elx8^Dx$kp^fgWw z32z^h`2*3e9JI$|(P%G{k#E7~!C@sCv={dHBuZE=XdR@6;1Kv*L#JzDynE=8-e)5H z56j21)W89qcY29`w(JbWDJ>Fu5j3QDIW9g!P5+R$SJB!(2)g~@*{C0mH{{D*#sl$N zP$p@px)FbWN$@ATal0cOB41JlG7K= zon)ZmRwvPHg@3`zpYy55nu^jP zbd}=SO*(<+UgI8vUF}UlCJR)6!%p@J$=$OZ z99!6Ar5OQ%rjp1W&ht}gCnG0@jPq+>92$J)?_|O7Q`6E^)oPd+3NATCvZ%z(n7FrJYPA-H<6wrR{$d z4$Mks{VaBflJuNsxOq#pYBz#69a3ETQ91h|7R!JC=rHh_RcWy*{np&kn%xB%G9*e& zLRw|iyXD}$nN}K)Lw3Ar)e{*Bt;F4igUrjzKrltyqz0>l4IPjib)8vzwmv*!0dr-k zg}^nwewguG(<{i1(IYMw#Rzr@rLM>fO?72`RaGvx!&4YbGXs7kNwCvKp@kfu)l_$k z$(VyKqx4WxtINT}9|HTPy)p6Jyaw|ECdMceoM-$^?qw}cc}vJ&S$3c7-Z(iqNpqxN ze*8JCAXGBa*Gn4w{L#mPbQc+>#H>DZaiwZIo3+q(NlbDxIFv3d-z+gPQ9k3H3=jVc<;@{1*@~BthXcYF&E*q_j$o&^tEkKhGzW)(yWGD!@wl*bk>}$k9lo-y1 zMe9T1%XYVO9AkwwQ{~&|SjvfbF<%bmLROGdD3qAQ>+yFmKi^Y@M$yhH6x8r;o|pL_ z9E7aCy3BBxo14PFIr!3g)H1P@t^=*{^&Z{5IfX^9`u}a5yWTzBiz8-EJrTyaQU4li zZ|Z+?VL%mIQtNvJLX^z>GryY<0~-927{_Is+Sw1d@=J#!a z(c6c-}3?prIef?|Sli%y3-*J|< zdi<}qOKK6b$D@t6b=VA3`$V!ge+vxMR554jtlEK+V7CzwB9!HSvXh!6xIeGs(CT-U zWS`2jeR!KO$qzT2`vVI3eP$pAws1GKlsLum@EHT8qvU+&LgYs@Ro8ZyVpNC)YCraF zcgCi3C%PjEmCyJ4GnfNL+r(WAVffKqaXr7*3;N3KWYyzGZPd?w{Ic@xChB%d-1S?} zT3fD{vR;*c&*vwNWbbTR=Zd9D0JKS^tBRP37#r^23&6=FkTakDo%-&O44N+?Ax$!p zL;!$M8C;$WM1=xGx-E!V0oD}Xj!BFDTPOqIW5QYI7aKL-{SIKlIOP2MKQUNqY!C8v z%Dk5gT3Z$?ZL&bV1+Xt-RxQWT1cL^bVB1MG{+G-d(jadvDta8*Nhoo)?|P%AqjPZMcNVwncM*X1@{Pkpwf?}y!dp#3iQn(R zvI%)q*H-vi4WBa5gRO$h31xU$Yv|f510G0X-aFWy+%1h#s^Z+(#uTvi6vyIzOfru+ z#kC2|)M016nBl5FX!{8&-Fss0x3H*m;tD7-Q7Aw$a&V{YKMTgP3C@Hp?#q_VRuw>@ zQ1j^t^oCL#GPY9yl-XDp703(T4inN~mZ9chO0rchu92jKq!2M9cqL~_wc2v$m$gqi4)rD%$0dMO-Z%Cvbg3-$Q+)Nj)>bE$3J!H2-O+CKb9Ft6DpXNq zWMwp{_W04@`dy<$U&v(*(5z-!nXboYMY#>AMV1T++D)eS5KYC!<5}02X=;o~p!E4u zF3I1A&Gf^86y24VAhh>GH1oE4c7dsDUsl-XuQ#>{=Kw!130i3G$k5c~h4AG&fN)h; zLx1~T%_W+#cNs;X2LAYnfhf^!UJM}j)fn~>l80?*Nhnb({hY%GgZaz*M_e+nODY*` zj+7o0+8-OSO`#r~x%GVs90G2@d=-Zq=~#azKZRTQQj0fk3p=#iZOeF-oECTGGd6sE zs zbP((D07p!V%qvN~8Ti)$72@xOlq3bLbbnN6SZgdQ6Q+zBv_S@h{DK`*9sDt5j(T;)l}2q0ewlF{pz0 zpe;rudESzWj6#WpjYd0o2$>-4_>5?`~q<;1Br}X7-|uui5LJjHeh}@T)}vciJ&tZa&@A z&$t*mT<2_c9d8bQ>|bqrrYsX$YN1lNJQPj5TRdA~zRl3@q2he-3!OH@WoX1A>jv@E zq2;{ShBd=~N8-anwY6ZDFlCMu20FTH5O!ccd3D(8fBXUTsZX_`{`QVu?c8yR2y)0o zqMBZ_N!xo7_nEV1y+$`b$c=BOCQTX$Y@vlu`SZT=&2n8;KbdRUYyn(Nsebcf*1)Uh zXei{@>t#$Zubq1T7Jz&CZ?V>l6X_4i<4|2k3?}2Dj=|8T` z?>1L8nG}b8pmww>J*i zpC7NE?6(?7upjUGx%-l&3rL**GAr=Tb2q8oB3mQUu=t?jy?FZ z3%dQ5?rUL-hinxwk!-H1c79uZZLE5tS^vjra#T?lJqr+N{V6yjTP1m8ZwTy}`#hLo zT$o~NYH;Y)_4_%5Vt(vw1dx9iS2US&Si#YMb_4NK|wRmq;dD6SM6Nko^kN%=E) z!5J`Kqx@q{5=^J8c7%Ev0I)78`?g9d@0lD3B1TGl{ui5TXL9LNoIX!TTA$G2+#JpW zv0rw7ZL|{YbkZ?W^FN9%0MQA3pLtc(5+NZWM<<}hWeW4Hp(dLRU%apU_@g8E(6g}uWnji1MrAeZa==H$Ilo17;@P_@nRi!g-M>XGE$>t_fz-&e;)w;k$fl2L^`<((`n%CKBBt({8KAKS=s z%rF6Md;hW>mz~`##7D$ZCl$C*{I;UhT3q z4Tgjv*O@D92=9a4VmmE$h71n*D#ff8;tc`||RlPJk8Fw-{Wa zkE)cuo%O^0jhDZpXZ3pP=O?18u8`#Fs1cW zCXd<;8y8WoKA)+@$)U=qp^gcQcVwX&cC#0AvyU5If=& zi%OQHLf`Cf2v_?F+@hstdV6M*Qm5q!8SVe9-!zHCgHN6r zU0lI4cV>pKEw>+5v5L+LFvt%Be64Bs!X1OEcH6H^X6pG-?C5su&%;>wtidPfeZ$$% zH07zGAr24}g5ZI!H$Zvqwl7fXI%Ft+{9J5$yEEt0pG+4jkF~d8!Z21cG0|wi=bWCJ z;T;@Z_-#flN#-j~`a7zhvwS~^6?PM(;5qMKc}dNt@HV-^o-~_hjh@utSUV&Odc58p zOyg~tX;u9D!y&?6I@#H;cB4(qG(u8ThN&3sev(9kmou!bCFkwy)rhjWDgH`IhESKu zhSi@w%#YQ{LcTwm(aU-!%;Z0s??JP9s=D`^K2OJuhS8nM@1pG&-Us8>%ZuQ>_JdAr zn2K6M34b4im3bd)i{RYDj=;pnRHFEuWNvYH8U12#(O8(@Ler;87rQuya5SC;w3xq> zfjb*gC>~k-tHdq80#0t43Tj*D(k$$g8;EuNq5g(l(d(;0^21!JY7&P)>FF51vZ&U1 zbuVk6Kt7%Se208?XgVG30kXICMsOtQWoxTvkvSr>%olOqb^Uis-d*3h%k`v|tENau z?ex;?*{qLA2zp|l`?j>hZ06;{_7~NU`mXk^#IqCiIh7q;+b^!##U4s@VMm_%?yqj6 zd$rxKixpogIj%1_I{)+Ri*}s3pK9~*&RLbkar5fsJ;pKKmg66#jrL^uQilNsW!7Pv z!Z)*Z=QM((>TV-nyRcz5Fisu6>+z#jPs5+dFFYI5Dse8YM<-Qa?INC2r@5+Ax2Gja z@k7LzJuSj-z4`GgF!_Af-Ti6~BVpkBe1x;q-}yMj_e~aw_u`5j&J5pFc{)zwNZ;9v zwM2fMX28pU+;m7Adw-;!Rl)Cuz5TX3XU~TBI!7;C>BcUCDqjzloz=k0>J^o~GCzgR z_zr&CG=X z6%;nOp3g9G@OSJ{B-iu4s39_w5Nf3{@I!<|y|jfVH0-C5>wDU_!Yqyiy|vGr9d^is z*m%kn*Ax{UW;GK$t|K$f*terX5^cX(XYfPUqBet3eD>!IiW{~!!d_faie!DIQ%#hj zql+ewF;LBJZmsJ#+EL}vEoqAhNSFqX5fYLl@{3;;&)V7eh*Q(Fzw>NM(I|UcKXtQ( zOMLolDZTS;ni@@XyMHFwXlUSO2i!Q@M6%JDyS{&}lyn;_y6QxAiL0L3CT}UZwZLV< ziS^HWDa!i=7CgQyu>och*B1Kl=a0If2OU@xbrjY4ee!AAsXACngyJ%fi!CSgJ|~GeS?@U{x?wpCu2E z#p?4KbKL)8-Nt&vuqS-h>o((vLYe&7xsF0spO_b7Y^O7a7fOp-M-aKEU@U@_CT;MwK^dRkZM{?M7bUJUqZ(GoC`5#PcgMhKT)oQo;Z!1I5fR$D5w&fG8JXw;8fEo@vq%)D2{+8?M?u(L&{^w%-C4Wv zWZHoZfi&kAfg&cqqle)Za`Y?dt)DTBYtnW=H^Ju4dxjd`G*esot+ae3FSBddDD|X` zX10#l6tGrtmDlP|9N~RJ&RphT%YHkTK>HF)?S@lmv6;l z&;p5(lZjnJZQt%PfhQWJ#=iw|XHxL7%$u5KWB7HJZ+B#c=S5n`-+a)X**eyn$YgQ7 z$|)o7!w9bTr`r`@o&O}iL7>Y>vyn+b^TEy&ru1>W4Mf79gy(G{Y47K}XOgX#4M#q{ zr(j_>Y#Jtq>=%j~z@Ecx6@0&vN~y~zG@b~%SF1gSx)$+yfvgUvTrO6=U3qLp?B5evUWWo&xmsf!q@xy>X3!;JVF7H}zp zK7_S<@SAvkk3trr-<%~sgfXMXv9rLq){;21y4ajg8Xw}tS;^v}fWR_L=N^Xd`vo+h zZEql|C16!iv+I=}wzhHS=HX6EI^w_B!btLKdK*IolRM7Kj>{ zbSkJa6o9smBzRn>dz(HfJP&Sc-p(b?AQNi}=UIxt-{2wRA(Z%o<-2&=ql=r!Y7mAk z2HNm1eT1`COe*Q3yU%(+qzyxR3`O3EQ@Ruv5d23-8ZS&AA>%=oJU_Cp_Q%i8#R%c8 zenhUsB}8HIXe6K{{H5xi79PJ0efAu?dp5#2i-sTjCS7a=Y8UeHa;Oo*y=-P$S$mD( z(>&xAetbOzTL`RD6PKV_T{=vZ>%{U;`Jz7S-lgSE&7~0i{6GhBRJ&Q|vV@w0YE-+} zy-9C-rd+h2v+NHqmD%Q9{eOQz7D4V0Xa7}FTf{(=5JbF$o=Tf zYB-8nuKS}qWnPM^dEY)(s=-n^pS>VM*JRqL3t6L`f0un@I)$!bmL?ADs5!)?{pY|~ zb>)ef^jXU`)HfZ9!r&C$7U5e#?WEEh)rK0gui6!LQ<=u^DCo?a{Pv*k<8%#vMo zEB5)w^;T8;>GE}!m*s9Fu}7m5*nkn;c{4{~F@ESZMH>mI+5x!JBk^+P@8v)qxW#yR z{hA?xOU|VGvk?%mbtGj)zBzb+Rg8-Qppai-VL9RyWlhJ6p<=;Ui=ta-28`22^@;$5iGpxd59i^?r-zn%b^(M(rP@{d#ZRi-c1u^%zZW&s zSj&o;_?GR@mXt8}xG==YckS=jwMi&x*O*JlZaEdi(e0EY}8^8t}of?3Dz0b>~0=avdzjk~0-&J=zOE;IY`c9Gj6 z#zW)#RhX1jU)|jiv|$we$$!6Fmm>D5^>Z6YuECn_9k;dn7AH;Ng>v?`f3ombu8#jtusCt zaORtiI$?*mlv+(Q4t)`^VQq#O-qjtoN>&A?&6BT%Lj5UbTHCu+LEpkU;R%0kR9A|Q zd4+5CJaJwu-qpTA^s~_(yC~|%UrcI^k;yNEmH^6OhOoT4yH|j+e*2%24^)Z0rsw>3 zbPbOVzs=DHaMhf_d3V0hf*DJYhodMYuSJC|AUhDghkshg{(aWwKU;w8g0}xL$asSf z{;Nv!LP1x_UZ{H+mPkylcu^dRdSaQva+!Mcxc-%kL5gk{A`|#= zbzwn<+Xg1AQZ(l;w}!tb*hDeW3{Q0Wu}qM%H3`@&Q~wGf{K~ImWn5_gufzWSz8(Bn z`NypN>>G{N+s6hF=yKogW6eBz0ZeOTS?l)~F<9fM&Z5fZe6P3-{#0qTvyqtO(xv7X zZcYO6sIJpl&2IC}*akOa2&ND_F@74*ss!QcjY|MYLsxi8<2lydjl3?1xfL&D$uUVwd>~Ntbs0)%?;k$^}um@t@!W#P&5cW(EIEJ zD;sN#&HoT9Z8?Vx2;U4uKlTA+ghgKiU4}yQ)@}DZ z{Dm6uPTBZldw#Ubo8U;aKrzj@So>HNsRv@Txl;BbyzP)UD$k|-BVlh-1t+B*4f|7T zhK$S3?YZNNE7r@+CvYfA!1tmJ$nRa8w@Q^vn_y=_L7>J4)j6b`=F^b|Eh}H5g@EG? zDuOKTP7TZ-NT6?s`Y7@<)gY_f_E1!wzZpvcY0T#(lc1NT0sWM>%6|SQ7O%*na06_o znikysPdmh%uPugGj{wE@CX)1N_$m;|CnOxn6sS~KMktHY(=#ymZSDGpcI;nYN6*X(zjVKHekm~JO@ClZ&0;1_6_6A6IqXGy>XMU8^c!KS|0##eIb8lfIHq$96(WraL%tlL?l3Yfowc##uU>AHE4woHQ>N7g##6pK!Qb z1=s9g$F2Gw8tF4R{Q9c|N*h_}>9@xnUaP3Q%rjjnBAT>aUAw%SFC5d~6)+y$OZoY} zgzYEr%Cct*IScfIRUf*Hkz-fuo#Kfy4Rcfm7k5W3eG>vYhAaJXCy~#5B^t4f)sIuc zFZ6Ka(AXnvyFOs_d+O_p05=MRp0uBvzjQ2JUZE-ec+dad7^dWV~;CM2Cal2DvTZSP{l!tY{)+Fe)hB9U>R$xEFh^Q{`SE-t}f2!0gFT@l6hTlBeq z(bP_mUl8?703BF3`J!fiFbt-1X06uR!kp5@u17*kk#|8^3lq%AwiA-Kh-iJhUM!=5 zcB`+;wxA4b3;k??N=Wlad_H0&X_EeC$!W zosLG0ubA5I3))xLOOi(}&~J)8-UCfa?Dm=s+-mjK!$qF3Pu?GC9#Ju;qDvDC(>fry zpYVN~|A{!*^nxz!Mk&jH$`=9+uNmYiiXS-wJa7uni%+?#%3yCZU`||9Ga4UuVXWHg zsS{eP``SMFL7r+%f*6V1Q%YQTMf3cnW7O@ib(}u4C7+k$=p(_A5Sw&)GqBc@JvSs@ zhU`c|#^xdxPt{je!&4Jv5+ldJ>s0ib53X=#p+M)j^GMgQo^HyC zOo@;g1}Y!8#r&7w$?)L4?MiT4qU)umi8veTMb?r8kXs!vLsntosWK3FC%$n zx%SuSC8$t*%#_Y+JyQ3)Ua;04-c7P$pgKMH_GBGFz8)OR9??Gua~T4T$Barvgq_oi)u~$4>_R z4Wp+o4a-ZQr8Jx`oo9l%o4tq*cV=k$yfCx+@2=>%QLxTOk8nb6yc*n#Jz-3Cym(9a%0|y&hA`bE~q&ogJBBx06&J63Axwnf_kzhRm2Gs-5ShEw~ZJr7{1hlslP&`^YrP{(*@Bt+8~T%DG+$_u07-~sE?8) zc@d`7Okz*?C4`~L5KT#(J&1AsGbLNba~HK`0saCy&Q5WWX??BClu2gZg056oM)se3 z9&$}OFV+AZ(a`!|Zt4qEBxpG1)8RC|b+m^9n3CH_CI-a0OdH*DiI zKtYgDgasB5k&-T1N~9Y^L_nIQk!}!BL~>oaOF+7$yJKmjySuyVJotOx_ngm}|6pf! zW@nyx;=Zr@`kD+6Z{}o%09yEd^<&6{-vy_tRs7zA7+?YzmgA13GpMeQ^=?1lM_I?Djj_DyqX;_< zq-%WsZ)sAmd(ZxB!wI_CS0NR+itxF9&qE%bigp``nZZr2$0-GwwFd`m#Pu>{>iB60 z|KzB+%Ft0mg+LtyPdorUyp)RXMoZVpKLrk??Q;eaD{B8^VRC0Sqorwych!Zp_11c1 zYi8`L6LQ9F@hrCfSpjmgFTQi$@v1CddfdXqvLtZ~{@<8)XO<4tbcb~{gtax9&i%w0 zSqVO~()jw}pUnAoM2*o@*Dk z|G9(Wl8)Bw-g4@*A(BYnxBT~OA42XvK#?N~p8M|V{?oHt z#ifLOim?9edKXEJ2fh~@jsR#9BK#cfe;>oPO=8681^R35ps+E%Hn2Tnzy9x}I}Qdv zzDY~Y>IC1O_kXrotLLmg3I6Ag`n@~i|7U9lCBo>6c&Ldhzy9TNii`54iJ-(R%;H42 z>EY+PY+}ctCBKu^p>0>8Z}9()c7FlNQ0;^aL&#)-k6O%(G%}FwBLOvGz%~Q$Ru_Mv z+r+y000IZ{;l0so5hVlwrVev%>AX-K;AHA7rr>A=_B^&OTi5?s)TjpsdlwR|DPMn0q?8c{uCh449++rz; ze1n{F98jF``*m*;Zx92$ojpC~#yQ;B-{13^E=B=?9J^e=dj=@Zk0#0i&Zl>2;eEz{ zhWbe=y(>=nJwF{Ys&Nira1aHOoKKLOrjvalFBtaSPWbuFI}qj2GREv*Dv3r3?r#_v z)Rn{IHYe*Y*FeW4+r(<8X^C4pK{XWCZ_KEdC^K@T%1H7ET`qg!Pk~d8z-ZpaL$1?* zbxEK?8*BZ}cV91hal{_cO)-cmb*FbLpA*wJ4ABjI!IA0*>@HGMUk zi#SBsrfV3hef;?GnCRfmmioJ>`1phA3rmoTkPLBZ&9QxO$a3b?^N{p)PUuTa!*{n& z7?r%}RYEo8rwN(p6qfoj>eF*PjCIGI0OPh(>lN?m>H-C}Re;WLvCq&8NQT>EM8=h$ zG?nc4enbw9jI32gBqSsx&e(27`K1}uTx^pDUhZ=am^=^KhbV!P;yoI4* z89i;!>DjZD=rk#MIfc&CAu0vcFm`Y6$7I&MS7lmmLqFjXf%&=FXH7eaHCK5X&y>46 zI@~vdY42SZ-(2ntUF^7x?i`>1e9q&fOY>xUaCrEY6pk1slAP-eGm6_nBQj<^bO|Qh zmOKn>Ma~HiJKNh+!#eg(+wkG!}RlH9ISjS_0OMeiI#K({3x@>u#>#q_6WGd7)vAWw&8AD@I)m6M_M%sh{%v z;`gQxY#Q|+ztkWz{gH+nT8I{annslAlAhKgks zL)J;xE$TMBeaM}@Y)0)dG0CCVoabnkjnCO(lk*oV2x+&Ds$1STwqzOP*IXXghfrd( zsre~HvGQ;fP1tj5EL*{@FAootPat_QkOf9Mw zKFN&;kh7o;fHO`UpCvNu6%6#GY*p_s{u{-dq(I(HTHdpu!zy0fdrs zR=dmU`L%QB>L4&*VsT|E7VL4+oFd{A25tsRg3Gz%)%mk2y9KiQ=aC2sK^_9r{OtUM z*9PZ6fZzF%*_LMtBU<7x@V3+Zy7TSDOWoHmlsgH#Q|89w*fo+c)So5U)<;sd>b@C> z-+7{ht3bipNHa7sUt&##&BhAza#FpIT?uK0^d1Zi@lD_CZcSKLIGUX;13+dvopd3i z4;7_2y-d%V;xlDGvDg?=Zye%;@FBU61o>0!jcQ462wjXF(i$1fBOrmK>$CQ4n!%WJx9~Q5~Q$&0=$Qcb6@A0#0iKZ zb2O`@6$z$T&j|<&jU2TrvJeZWJvbb#5+Uar=FcorWN+2m^WFW@OiC}ixfO}sor z$M-6)n^^e%)H&@|PdJ;8FW>OTr8ELV7nefOVt3fNZ|Ibyy}oj3t}iM6J#e2ybxS~|pr%y4V&VPEMS?9cpiw_M90vZFYD-bV416TH*iHU#m=g~jM=Vq2}n|-vj za&d)*TUp0CuHxb}dXjivUj^S{%sEpEFs072F=nr+^lC!Q#^gzno zG2v+e7=fLp3%kCb(S}`WqyayD*2kTN?z?ro%qBDGI-tYrW&%Q4%h5m9xxFj2UW}@+ zd?R;DA%aeZ(!5V48p;I1u(}y-brrsg6vwj9Q89T10q6 z#G8wh6HwF`5s`rJ=e6>M7Zjrs5*#jJIPWca^_ktnp{8d_Bdg6=8K7Suk zMk|a>dwi`Z7)FEZi95+|J>+v%PL7k^4IOh`e}O14h@N36x8l{z%7R|b_`aM`*7T9d zVZL|5@CxU__xKWVkM{^RpC)<~JnR!5@FYgC0UNLOV8$TVcX$67M7wLpS z4a^}>3)pPmR*2yiWfBn!Vej*~srUF(A`JYJ7wvdU(8BQ=AqP&?fF=_H^?dHt1mNoJ z5O&$`D=@GG`5ghwCa1C$oLgQiD{n?7=G5yq&s*oCVm0o37E8g;=l+5BB%mdvsWD<; ztq~%T5*!xxVSp1vm0b8iE~9YsDD9oJf!H!n4=Z*cN~CSt@}HaBES-Mf9V3ik@H1?`u3K$ z71XiR^#-y5ujL(F9mE##phX#K2lwr*;P4K#inSH-ULH^3vc>iXZG1l3gdLBbabZ$B z2E3w%K38F{z3!rnic8MIT>3Sw_bY~mk4%Ke`ipTFjr{E!p%bws#u+mREP3box`;eA zZ{{-Y4I4&$4fZrK&rgi{Z3Q7kc`^GW1FElOX8}z1c$GSthEEg6>20b5{gvKP49Wte}*? z+hTO1(oE zMzbBMC`?KR*6Is6R5PVR$;{Orx!a#ah1Z~y2Y2Gm9nJeJk}muq+@{hW_jcdeQ^Sja zEKq1g!_4GWN!Q&X_cX6h+-|2?B#)N^qxvn+dCX=meJCaH8(WIpE!z4Di973PK954z5sr`GcsmTMSb0#y0cxbW)DmKjP!rU`HLb#UZN)m~F1j~0p3u<}r6U5?i4 zNUMwN(3rStLtaf}5p%o{>-FwW=Y{l$;So-*;o>eBym7=* z9Om)K|2gK+1W+T_SnPKx&gm3a)XE`WdD3ZEEzyS+o@m_wU&0jAY?8-KT`X;C>ZQ&d znea}+ya2S6cFgfRP`!5#AzzzJ0kQe^=g+FMzWz{gvFit}*W;+}MuXKkV)eLfOD+XC zqh3w0VBk2G&EuaX1g^Oswi z?6Zk5nQ`@8>uM|%RI8Sf5)D><%P>obbJyj?K1~nrxo}$J5fsc!E!bM;W678-Jn+6? zz8)g;;Mp+Z_;Pn=-Z@+Tjq7_`k%=M4gqwZs={1#pv!VW<+;6O`9pph|Y7xC>$p#Sv zE4MIO2pedvJxwoTsa!5bPh^rf6k;D{sD z-y`MGo=s=-^iR!H+@@R`Go4e?a=sj8M)nYwIv;2I7LbdGr=E}=j-`w=lbBH*DWeso zF=a268X}4Q)^g-RDuW~DNIe?=_*k4B*w(q#=&ROc`B%R6wmdBl372`jxM+ZWkN#%~ z6LT4SboH){hVwxxkH`5CLsphToYwd>N#3XiwJ!j;cuZn8jZZZ8`A&N12%V*6C8eC> zk}7#(@|?vGVz7w%Q-0JE__7^q3R3oXR|ESqHV^h~IZ16~D{M`L={EKZ3y~UK%mvz~x;hp~U z+BT27s`N=h<&%N=;6u-l$$lyN3iRrMDJ`Yc7U3qAil32C<$x9M_3VJO#CSE%E_UqV zI!2+_Ew$x5P-e63PzBZHdf7=y`s8T&(V z21eTKZbyZ3T*`9tk)m>@>uD1kS(&D)Wi}gWCE@MGe~pj&e~L(B)t&!FkfrH0%{rk` z)&DYUY_}{wD@ zhQJptZpIJi7|=tFx?R@0!z=`3l`W4H)l^apKdtaEN)P0DjSMZsJ>W4`jgP?j`8@mXh!nH~oDV&6r$4#tqA_R3(@pTtS2>e1jCEn}o;bFQG z`;Ldi?7xxSiuL%@1HVXUgvq<6pz2I-C{dh+ELd(h{J`qlN{vVr_w;=)A2vxmHP7^O zke#QUQj%F#vbyA3WSl78p{m!@mqyPu1I3C}A@H8~h;~+F+2BD)s7%^;2!CdTiE3p` z%mm-`xPBX3p7+up6nk38swste{FG?Bx~BMm?mnqGy^q1 zvzJ=4kEcX2{6aD|Dovuk*-@aZ&6y|>!E~5|dfvJ)O&m>bRdy^+5N#AhC!c@!_vl@R zI07<}?x@|w$%AB6tNfFk42%i`KJlaWs(l@%MnSn!2sG@IVKQ}@*=p37HfuZ0==D&~ z+-F*=Y2_~o_8%A}mvu|AXxeR{58`g$on1J@AA#BGl@a`W4#q^H=S$737=rGLO`TA# zHRsfP5MknFTKW`;@`{q4^aX>6DBjcHj3jH%wv1pnx zWWqq2o*zCygi(I<(MqnwDo6cjEwjFT_`gpo8VYF_LWWVd8gdje}!?c_aMR`Gpo_M`nj)nl6pjoHGeD6M+o6@X~x2?(Y*RV?8g=c6|`af z=TCv6EH%S>y5oNb%v_@utWn2{CGz_bZZBlzro&c5+i}#`=Kg3lwAHEwRy3ny$~QOp z+UH%&sI-!Wy-@f?k%441#6X0p~KrLXTMy_YD=G)(G3W0~b+~cQs^pHz4H)~p`?!0T~z$}Iw61jjObvXl;*o3KRy1&y64mZbS@PKcd>_A6PkBMX^T`jw#+@)pA4t)FB{F4!oH_`;D zR-Bv}`c|&2FpJIR;ZnmUR#8R%eipLZ>qkBD#`94ed=c;h+5V=byC7xGf>$hKXf{GL zu4?!@{!~9n^0NwhF@9R|QZ(f-wJjTX)#O_Y532_DI}ab+WnDdg*)D_ud>popzwlua zzQz_15P&aB6=BQ~G9@G?&bT(U+9L`ss}o|<^0bO?uMMWD3xqxr5 zy<1Dx5-E2c)@eTmimjEszes2NUzW-Jt3GzuN3m4ipC<>0l8z6@?plTR{E%zZy*T&A zm;JQBWi7BdJLO~ksjwxdoV+*#9rI&7B^tP>!Zv2t$8ydIW)>zS+=j_7Iw`u`SAcS= zOi8?~tZaC28_|+{b$(3ig^}#b#?BPV_EB4VWPF$eyOih|&U1)?q2XhSH|yPHS_)Xd zccg!z|Ce(+rF24&*w=pK>%STmt4G`%SHls0pS>1bc6aTbSByh!y?izptc-;SjsK#L zQH1R|F_H)F@msFIS+See?8X{cQ$IXOWs2oq)^`*Hs%=g=GZTyPf)pDd4ifv(64qzX zNl5B#p&0OqutDF|!1*L@QnyL`ov9@|o^T-}Hy1Y!#hX_GR3eki@1^Ai{_pv{tlAH9 z8q|m`s`+_fb83acVLc#*+JH&nrImy(*=!Hv-}FQW2j`YyW6q8P4fX*_c3CQ>px}sz z>pj{X#j0|R_oi8aI!_)w+`6BZc8;evXBD-A?gfZgY_}Y^)k)_>!e=;O*<59r8pf=7USdLXPsJ4s z4A4jD_*NB3eIEXG^LYZfh<&>bO8t-k(1nZg- zC&|hCmE^*bZf@psTk@WJ7R31>b8K^HLZep+i8Wsw0eFb4+wn(XJnnI2=7H3Iht(@0 zefD1crQS+^f&beK)c%8Q_7qL+m(U2BBJ+Ry=-{HUXa8>&2S)z()xQRJ`xy$o?cQUg zVvxnZzU|KFY$YC)5d;m9Yz}O5`lnclcK^Ns#T8@sFNMSpQmrT`s8=K@F*PSEeN!o-E*scygF>jD|J|JNO?5S?%ECzSkOCjVUx zbcN0U(LkaDjwR8tLg*qeJ?jsipw<6(P9Ev#JO)+{JShKdQtJhRt?NJ35^NgZldZK~ zNXOT-UWk(Q-hbH#@T;BPBJKWVwf>#4>HkAu0;|lTB_WlvNRN4t`Z;zy@p(xU_T|7V z`Rv;c;a0)O$*NvI?&#jbxYe({&_(3T214*O{=XYGi|5Hz{|jp!L3=`?qKDv;3pH@o zrX+@g<{q8qr-%~zAg4&M^p#SBpPU=}O6Nt5!1$k+2sV`U!-|WOpR`XO(n3><4+xIe zj-3oCd|GZB&apkw9k}eS*6Um9$oNqY9L=9PHP zgXDNjC0M5-wfCIwcw>@b%pbgo$*|9>zCibmN97fjruMhWQnxNyR1&fEJ=k(590p6Z zTMe3mz=@-8bsZ_8Pl}BYQ6MW6jwKa&F^PHLp_l86Tmy)2stVsWLJW0K$7zAJTw3d6SX#%4{T$P?Vn7nIy?- zx1%#J-hN#zzxG%mfe4VrPLv78I3Igi$;+RhDBKE7SCG%LJF3a^^UOxHY!AFJX}hjh z1*B}6mb{493ZIN4cw8=iEgS?7Zk30!jUwZfKok+iHVU|4}Mzb%01+#}7a6JQD| zb#MBFTyQzwv_FZjivlK?h_!T(PWXI*>`-NtmQ0Q|i1ys=_S+O8u6$!BX_D2eqOHv$ z6_Wg_GEb>9xm|iUCNDaz0kYsMr1(ruSxm8?)4Xj~cSO^*b1gky;PWX!WSav@sMn1bGV=MqmZK_jotERCd<{R(1X&7vjNhQj zm{_-2SAK*`EF4|vFK4Rdj63Mz^Cz{otzdSVkf7qnA< zJ0n=^D>UBK{UfCLJZc>+?Am#z0fIEj$~e~q$6~>G6oZU{R)^m~gU{AQJchh`|2JE~ z!eYASR3SjM&Fh z#pP{)-2DPd-ll3Q&1C$30zy$bw*kybNI1i2Ok#%fwfp`iH8Bma%7qq)LOUP~WN%(k z35X00UW>JSfB0AiEu)xf7C6HJ%sp@fEKJ60R5W!K$V?GOPSvIWVz-=-QE0o5KZUa8}uu0Y?5KVGZ(iv zH(!%OfN8qLlnsH{+DMsk*XkaEt!%<=qxKjj@8)u>2;dk82M{t|oDconr8^GhU z8l%AcAF=TtT~&ZxUw8Hsb;7OPA%t4xTE<<_0l zj~t+u?ss0%_;T>radGj`w>i9{Yg`Pv+T41%XncNmHfTYY^?@B5B*lrp$9B@ZY%-;74@G_>W~Sa* zrodxp!VSyc#6Fz)Ku z^M^LOxB}1Fp>1z%Mxo8becWl2Pc)5wH<^=8O z`y-3Te7m-yziemab)|YQ&w*#GMb&y-5i{&4j00ot6+>sD%XwK$8m8yp>1mgBdLugj z{h`a=DT}h?wg`jPg}s}ay>XD4FD$A$*-#%8NJkk&YI1zuwHhdVbGo}lq`BjB_6!W2rmu1yAaFj9SHoe>y8ZTqc6~6+L+zp(Nj) zmr3@=@F|ZSDh#-sGKJGB@~hzAI^#PfydRZrv%nHK;=b!?jSvCZl_ucuppS>DUArs% zNMyKS6B%@$>h4x>-a{%Si9wjC=1bp5hn*z3)Kk_PiHDx}PcSEnJnme=_%$LtnQ1Cs zDpPP;{q9}4SqbpSw#=}t((u9)!gJCJdg-^~tcW-MeXDHJy;}sA|Bd;2*Sn0XO^)81 z0S*s3#!Zjq=#@y7&*3lEJS=N#O%A+4cE8qf=S|pqu?Q5%&hH}z$Zxt?*gzk9vI>-R z?hftGwB30ed(w+`s;_G2=Y^PZyTLKcl#6{R3S)C~ftyL0*Op@dFW;e6%Y93{`cGbvMhX}XOt zp|xa~IbTvq3?vqqn>v~4>h4w|IBOc)sX1-*aQLVqu(t@uoxX{h*qTEZ!fU3z-gVN{ z>@M=SUNO?{GBGo+o!{T6y>_{_^tf3iE1NnwqYYSgXIt^3rQlmynL*Z1>Ab@_diAj%NXC!M?oOzGR4>WpU-G8rTzq%;n9r(^e$zn>QYFk+yGQ z1)S%@Mzn557NwMib2KYVI$JmLYR?8kdl4vLO8%7%!vo9J|gPe`P#+$4QfUSg8GtxnG(Zm{{UqT(!GEvkY;jZZx7ZQ zW}kCd=*ba~{>M}-`6@E%ohn$hpoC}6DnlXB9mmJEq0yP)Ba`cH{R2P2`-MDP?5H0Jk^jxeT=={;d1KcB9TUTsi&P(Ko#UG**hr?*a^nqtM4-+pJ z^6M_w57aGdt883b#_i3ZI4e0yy&au^#bR{hG&RJn?XveQ(Swcl2@bw6fpYMYlRvk? zT%)s@3QKl&zCG_#Yj-pC4V8nH<25B;)@?vLNt|{A9ZJ=T-Hpqhr|WiA>MzF`b&J}K z$hy-b+C=wND_H+M6D-n=gMAn2WGeDNeBlg}4jT{=*^(o88Wa0N%Sb>5l@)-{lGc^@t=3|UVyip5&Ey>4dHsJN0ZEHZVC4_n(@p5pp zMy=p&8f~x)?nfhfCw|J`>;@Cev-ld`38HIbA3AZckQd;!$^UGL)&v#yQRZ#-t64K(b?} zq^7P_+cKZc#O9-4nmA6n>~ts_6heD@I@;UXuz!rp_yorAZVhfvxpH#79=#8S2o&ez z=21`q5y5;_DG_*id|bM6b3GoXU{tr)y0J7-dbp|f$Gs^=@+H3;2;paE8^$|Cq-AH1 zSn8DTo2nkXo&L;s=s2E4ulJl>D>0?k6KmB7B3HzoY$JLMP4&5u`+h) z`r?c3vwL&>C{yNSvEJK;7|`w;KL||5ZaIk~{h=r!+U3A^Z~bSV*u9G%5vQ-k2jAjQ zY;fG)!}%>ycXS?-&bOL3rID-HuE8{e z-=^7EOqsDWB)oiL%lU9_rb1);=3?NfOe9mn!J1Fx)#fYdvBISQZi{KWMS?I;c4d`4**_sTj(CiFW(%uhSORw%oG}I&?Kys z?qpoF{lMcgi5Gof?({*eLXLoXfK{IF3~^!v#!}IH*KgTR2$Kij z6GSh-q-?=ya5oo+2PbOUC^Hg9U-eb-m8yU={COJb z4s6eB&Evc?H1K-gPu-%DkAovSDT#TnUq5=~4A6@swJ%mGjI?3amx~~nCE4d5IiB$DJ!L>?;XYx~_DL%D!8u@l|J><310-v`l0scpsDlT02eOLg`r?VO~x`{!(o<6`-Q3>k=l%)OEv zDnaveOoR!^ak92r@KvUJ6KA)6rD2su-oP4XD2F?JYM5dj%UcW+Gs`6&Ednt_!Nd}# z_c(p(aehpeV)MQ0uO^T=$(nidUJHRGn}WswdX`H_eFOiGkvQr=D9C~)&)F2O0NZ& z=7@%cfp!cY0E@&+f&_i?CpiXHdvt2Y^R5=X*6#? z%DDDoL4VhfrzPxYC^2V>XM!rN_}#`Q{Hf=KACKkkATV{q9I%6EP9)kbHTanAo)>>l zVtB@=+a*ipT(nkxfj7jLpUmUQbS~%3Bx_~E8cvP-6+;RdMHNN!Fm55nL~+@AH;ht` z;;WjZbjuROf}G_O3F$8Vr%FS_cXs+l!*nQk$Qm@f!|vFcc{$qlw+(m1n zt8gR!HY|l?ZH+Q{$9MiLwfUshL^Vx{S^_l^mj&VPr9{AR4(_FA()8M9=~?mX65!*g z8_o+0<2_}I&Wld|PKBoAZ`Y02ylBwUfTrRVmCKZAl(<7K@OppG=URrv(rK2N0s6uM zH~C@0Rww=>^kX_sPwjs5v}0}cr-9sZyEY9dBeM$pnp;lh~)8Eo=+ zsRTF0KG&t2XEU5R>e6V6R+X}H1`+fy0UstcZWs=T0{%s}e-PSURZ14P10u^c;AmWyV+8k$ZuHgo${T4tKGtZBq_UOj;X^IWpym zEyeAc){Lr$D>IuHTPb6fmix}N2Q}-}EjW&{x6^%Uh)8m?D+d{ZI(eot3$L*)aa+36 zZYRf>n=^DlYGyu1Cwo$ZqGA(ityV$r;jEvKq+WAjuHz!NpVyKhe&w-|$9TSCL zZ~&F%8-k_d&oOVrI3~Q4wOXyBqKRhJCP%$p(vTQvx#9NGhP8wNzwLIy6*+&aTW3xV z6PV|rAO=SiNS?g3O&JgO8Ibt4XrUT1eur6NPs;gfTq9k4UceidK%jP+DO*+(UvoOH z0lq%Mn3fhSXgKa4c|9xn`1c9fhccI>I-?{GuD|6;h^N0rLRYcA?tJB{pLN=saxHM> zx2=CLlRZ}ZT5Y(2;D*oYWK)|A$48(f3a_Wuq~_*eC*`1P&g$>?^0Def^6lf(=jjBu zkysy6%F(}F30L=sGz2>aO+KZqv+DXJvN!H8htvPbVTdBL593l?(cmYRYHZ>uTf z`~Y*z-fA5r#Zr?m_oP|LC&7bkzWIm33~d?LL-|dPjhktlkCAoGLmxzfOC;l1PyLm!T7m>V4=Q%UWPJ6ggsaPSwkDy+I`x ztKw2vNrj-ce8u7$_qpwnSdWxuC9FRo->UP8ZY<#%nTVy0AFK`G4C z8ZhaapM#I~p7VDspV)8gwMr?BWJ2@2K+YjCMh5)xQZCfG1DbOJ6@O3LULH%fnYnMb zyPKweui9Thj+T{T#>zw?8bh_0)ZwPUF~p}9&c`9=#r9MDFl+0)=T)tG*_9E08UYVl z&%!MC2cInU1;wGwWm`NiSHns|a}zbKv1R*5iYebC%mX5lTpe({2?@EA~qD7S%EH{VSmv$24!h$C@t;y z>B2bQ-UiT9(P3f;uAv8Juh4uZ5=XeFhap)L1;;;E8jqBt@I;hF9kUjXl$lR;np|(z z1@;KEPo^Qs+Pk6OKt@^abOLGTczfThKGZ{^X^wtNjtIb<*Q4*3=9ygr<_&GUy3LIe ztwfV4Xp+lk)KN{IZ5RC-HXm4sV`3wgrfN&J_N!WZEA@R%K7qd8O+He6Qf=CwwWC9- zyY2YNw>T%m;4DbOhxUc=5X|Hg>$3)k1FV=S_Gjxkd9Og?S$$LUQX_O_H8}QW-Q&KB z7TINQw(q8G2#tv3yp&tj{+xg1%@ohIpN#jiQ90pf)9Jn^8{uojeG@wor(ArVMu;9h z+Yc7*m3*|_T-9ZQRAT$QPIc;b{$E`D?dqj~?6zl+lZrgv?F2p4@26Nt>UMz=c#`Fl~;<%Gr4M!TqzV{tKK%YFd$Uoq2@Jby4G5@8s(fb8QPt z+0&Rp78#bRjrkG6vao#hgPpqH4mwvoOb zCARs)l`^B_>NgyCZFc_;(QPTBTxH%^ix))1^SPCVL`${6GNz*!3R_X~HO6i8eHcRY zKs5MgNvCP8lgB~nkzCei(WXz*S@6#DR`2*Ej~fFup_{IufUFUwY`Hx?yYAnF*^{G# z(*z9U2`R6ZBT63U^Vj%NmRgu>9n`4>O;MF(QdT-EyYc5NFJ45lj275yrVLQ4M*KKd zwuxffw0r%=EloUwyWt#rUB0RfH2Nlm9PPThJ%Rz+x8`|cdEiki{yV+2+ zN^d8Wk)o05rh7(n=UMRqf^Ii?Sm8RIm$a6>bR?$DR9njGa&lL@L~_W%`i_wV)3MU+ zY+B15E(;p`A6a6F0n7>;r9;UmqREQxAh`C`UXL+cuG~T*qf*0)*OB>x{e$M-UJa39 zWLW9vnaN_4rIotMQi~^nh1!xt}k<2*~b^F<$BuPu;|x_ZbC{I!eMc_9@tm599OxEV`Y=K7Lz z8a}X{neH%iYhLm-rD5CtD;GyrcCoh9@R}gSdEiWsWnO^Gd$M-oKHgMWn_bUlpiMKQ zp&VT!WC~c1X`$o28Sk&XU&J{SchQqJ6p+o6;QK@ZL(yQWm`=iXfGNY@goF_F)JF+s zMW%y@PE!+tuxfxioiUO}t-?@XiH^@8G_`&;A_KDPF3_5-3oYUa5(3oV;fo34JY zMl8>PTvTH+-;YmeNzGzqBfcnzo@Y~wA5uTbAmctZjsV?vv`PX5Tk3}pC>$l5k)LmF zZnl*W>e?@r&Z1o)N+8-axUsRZ$;zcRrLxmUWjPvzKWAmUv|i}M&y3-jX5zr4-N{ZY z)7&4=R+MH2opVaymJ4NCpj`T-WbYa(;=Ixj_qJ~+S^X-4_{ohSPxoo ze(s~%(^kUzBy^=;pfpYmL<#WGf!SIrh)O5xZT$9A@_x)T#5uLG z*Sa*iVk@W%T2S;9)5T(_e167gOh}9iD8QR>+L)=#J8J75u&>Fi=vz6kg|66D#>A2` zbv8my*46MZeLRyK5?N;LV9%71s3j0bofubt(P#DPdAty>0l3k#bY8(yN1A2nnR#kI zxrF3rd8x9e%m@mnC}?ZPpW?#EmBY(+?*+%jnW(u=?N$B2c{)P+q3Ax$GsrV29NV0a zWZSxXck61awqr9k@@s|x?oIMyB~$C)ZN5!5UehgyAOY2?#P5pwCJqQc{C<;p7#Rz% zwWp|h<;Wvs#940HTC6s4;i{wdU>S~Z?i6_pnZ&6DN7Z*ftp+qcE4n=^C3HJ61m3(u z6@4lc<@V+zw3$0mS07mU+Yg^HNAeK{m{xxL@B#W$eEMQmE|KMLnF6J7vE6A~W8+=O z9Iyfy0a23QPC+JW_Vz=GUC{#Yb9%MtDeR`Gix%3av zqPI}O6lO$3j4@%L3%={dx5}SE?SD};hV6>s+<|TL8-An2&~oZh{!tf%5_5ae zX+hyUyc@4I20!0ai(EMt6Q54+o^T1aK+c?!^D@fs)Zs>e75~%cqR598gGnCfZ zLjpE*5}slbZgQ}L990&b%ljsW!>Sv19WPM?bcVkHkEX5PYRRZdf%4j#ta<=rPAg-2a6FZsD)>u1(}(Ov*=P0n5_*u##`gc_YWRie0if7geG)IN3KYKCWP@oB!tmO zU;pFg1Ovv4-*vZD_wp(T9jzzwrq=N+1Vsk&NXE|_TAE3x0VRf0__ zDk3$OAakzau$9B;{Vc8}sq?Y|PkfyiQOMzEJXH;>EJ`P)oMw$lZ&8;Ys|^-~7K z-rw*Rp7K4zz4p~tD>Wz8ZMLn}g(OEgwQ)Y){D2L#D)=V{)fTH3FMDmbP477zdjC^V zeO_NT!dU+ItBlYf7LosR z@EzFw`u{kFxBmd2-2G1!b$h~DT{Os-1<+;=UU*|K|2x3wwREP*x;#1Kol11^uMOPT zz^g$RgJhc>h1sgD-d`77ukNh zlSdZmo%%fqRw0O@bisF?N{iWRr!fA0`7EZ%tF1tjB2g^Nwfu5csC>hCJ9^_clDQS-NyutElB0%7n6K}-_&&wt5@#;Lw({k|-zAEoE2*s0_@v=`ysHqzS+WSfWD8xrN$8AKsw1~ z#Hrx4y;>P7CIfD4G%7GAIBmQo)Z=tK{GQzg@}P>o0j5nliG9Q19x0ovUlyJ$KC<#8 zmzx1}$GsXMWQ0+dkTYATnfMapX`y_ku<_MT1J;kKs7pW_0;o$Ozs<b4ei?ap5h-vfu%V#fmZ`XG zHJj^RQF;CJu_z?QkT`y1Y^>_%PaD~9WhMJnR&K1L4>TEYNWT(4BHUCFE5>l-p1$G^ z@&v`5P+V4y+$Zs=A!V2Qr$`7Ck^)1xYu3D0&CTY33~u2-TEk&Y%jIbM69VFp?45cN z+~U;jU456_UQNMpwyU;t`^FO;4WbZ3Z8P)P{bq)SY<4QzVQ#vZ371H0M+68gIKQR9 zB>Hr*qknt1PGTSZH)qoO;L2;i*$=Sy+~)5p)9e}*II4ot-!|^*XwE-S@)}5N#O0^4DOXGnXsB>h07@1r9Rk^rGG1XZ;`)@6@x&zWMYxeHyXfxM#jGpgTlv2XClxZuJik zx$U_n(7h;i7i_kk9QaG1|J!o(>I40WNXyLvGn_j=FK@wT!w0dEVGjqESRznHtF_?t zo~Wus55X5ci~h}Q#=3IWFdvcY(nXGn>Qc^6EiGI#2 zdrljf(hMA!D~0Db+d2{Do1sB~X<%Q!1XW7(`3xfyv9Yy9d-aNkhiA?erj+Tm)Z^vr zYR!b-ud(m%-7e|w?ScvaVk$wHnJc>Pmx|o=z{&;vy5@M`iMOS4WpCb2`;Na{ zK)4GwxW2sKrE9^sK#GjStXVi4J@DD!*HVVs%sG!n*64aK<&PE&boX!Zxb6ml=&p@K ziUlEhKOl<74szL*IQSEa<8>#?qD4`hL8amB!{>O?7O;^$9qc1yA3Pdv+BV3b%YWKC$ zg`3$8Ss-Axm0UbP^&}Jn)E6X_=gl|ACHg+6!Gr3hs81hzARVi!jv|W^@;hs4Th+(C zc-C~Z&e+6$#UyypKA+;_>p1DdD14g-b-f3-@6wT{5cvUEU(D;*uWldQeAfV>Q&0?X z7D8?34ON;m`lM@!pP9!q>InR*M~@Vc5LSD(;2%FBXJHw4vOFRNkP^6DrEziMx}a=pxZ+?j;df; z7iS|{$9};kK-TP@ZZRn1ehbzDQVu1V4NK0Lp7Whi?`BzS2210~ z$t5~VSy`Fy+2uWt;Z2^`!DUw4muK_78xIEBD^+bMWa8$bSj(kf=*;0OX|yVYnyYM zn`}_Zs;}pFqs(9rN2B_K_BUgl^u5wv*`j2qN(h?kflueMa>tN+rr+MF-~Cy@SA5-H z?vwjuWMq3^o?>x1`yA(4`ev~(mavHL=0?wZt&J0y^`{CrEXV8E*XgLOz(j5>z%-vq zOu=apN*M}LP<{0&U%lcYeSQGfxZ5i!d+9EU_ZJ;suhDn|TCgr9icP7{38r4VR>-sm z5iWwAE+P;mBgis@kVxuh)ly}6=P988pJ=*;D^>Jcd`)bcD>dd{*xJU~bn>Kzp-xZZ z87h5@ekd7yr@m0KSrHLIvr)-qP#?!Dm9T4n9%K^|a(#m4+uxCKHD~~dkvi%Otzvrv zrr0PNN6&u8M~f{|L=PJN{bdxt`x3}WpLMgF&!bAy-N0wjAtdNV|H%>9A25!bc|NWo>1NMxE7SSF6T2qq`Vo9L0@+ z4<0C^dwLka_CrBdcAeK3@R!M>&vQWNrY(%KzvX^q$wKfrPQY)T1?S@qcfR;jKu%Hz z#nQ591>|%^)9e? zy+3f@%8o&i?j#5q7@ryAQJH{X+AwgC9fU`4;0`eV_s546{M2NAUKeO;sx5wV31B#M z-%m%#y4r4d_+A_;|F&>m?H&%#iBC#$5VY4>Z-aHy=f~0>%rxQB2sT=na1sNloVSXK zqlcuHf~||SHLTHzplsononODNq`H;0wVvNfzBKtgqIjGUY^7`j82Iw@Or+(U5UV|- z0I37(JwpvNm>z>NDdSC)>7M?H0}*{EBE4t(b~aYMBrz@KN@$zW#RF z_Fk$Rq1MqN9Q1a1IA4(CxjdhlAWp>>@h^_VZL>W@cdwG;+?CV~e(5Xb_Pr(~!7!JZ zNY8VxFhDQ#+|~Fa-^$cFS{XMc_?MHg?-^1TCYW|ILzLH7%i9~y;{}hWvV3n#M0TfD zD?DH!2fiC9;GUEp?8cS|_?%}k6jdWh*hBB!V8SnD2l zgIM?F1;tpxhZ_{$2O)iWKEgMfb3WH^^n6c)<=!@XAOEpz@-hdxwyt|uY8sz>Pk*)J z_+8x^2h#4%HC#_(%@%g|-rbI~G~LXuCFu&;&E|m>aiZ_V{u!ALX2(>m;oPslr3N>G znkvq>9vhKhUApceEX3@yGtvK_-w?p*B}QUvH|%AiIs4+1f zX^t__BF(|=1v`@;Xuu5&-&!2Kc;rM?=wFp}Vv65;G74r?bu24~pfKb=c_N9t600kq zFiI+52IL}O9i1x$VUr)_JRPYJ3jcZ1KalWJYcg^V&I9a;rl#i6^u+xRv*5DpXs4ow zJja!!!jqrZ)mBl5nuB4zf~kDr7u0?s-7uKO1~${^Vhn73vX0xKOS!kc_XEp&Ub1cq zS^Q3Zdw1L0y<4MZz^G|&YfB_zM{Q(bestDb>?1gMeH(D?d$v=Uc{vFLxI*Mu`4AOG zLismLgw3WqA?||vcXEIV$WwDaMD+wL=7G0z*n$^;q`-nFCW?EyeKYZxTLz& zbY=Z?49X1a9oSAwNNDnaNp-N&$~d~Isj4cY=hy-8r3JL?f+?u~hUzm8B2w z3vxZqXH-zz>qKBlR?h7V5ByM3Qd?yc-IWU=<1oCv;;P8O=%Z*fA1sUdEYO8>kq*2T zpPiANigC!lHGF5aF0%^BcvRbv9_t%AQjRbbPbm!zuvI&-DfzUFi=;5ttS2FYr{y&H zh|t)KxVr*J=1;3;tN?`}Y7c6Hs&iz>%OGYmhQJm|pJzfg?>!*PYznc+dW$WN)M`0O ze`TtdMv9{{UeHO}a5DeMh$D^GPR5p3=zz+Lo_QZx62e0U=+_$3l{bu`MI2U$iZ0A+ zX67iguCNiqFB+Z6`F}lR+DP4|FzLmGqe?Mg2UYIgf&#k>y+!@mt|GMtkXLkt(3%=o zr<@d>y;$rQkrIlEe~=Y90hDvn>ui;&gU$+A@|gY2@qq(%Is=eTZ=(uyaKqQ2{a= zKC-CN5VlwjJz5}kbXU6VH_F3`pS)^O$%7lCE($yttBtXe%A5+SqaPEe>pHzi#s;g2?DG%e>n?2(3*rNauAB3pLd$E}-t@%{=#P49opv zJm3IK7Y_G5fR83+>FC&-UO9LRhV0ZCVz)~cBMA1u&Z#RmAkeCqfBQALf^MVh#wn|=X0;S>RZp(!0Y*FH*wU>P6iilHt_Da?eR6TYMQJ1waACx z3!)6KNQtN0UnrEKMSLkOjk zRY;8c(3WE^HFk9_eQV2JI0-tCOU4^TIZ0=NiJtB;7U1M!0nq3d&h4>FOyt0Q=}>yv zee-M`M|EX99^>Z9iG~n<~mel zzXW{u8u~!rQS?gwEiZcZG%z9I<@FmzZ()(PsRC#N{JTi^b10DQ8nBLe^zIj3 z`urD^T(-l~blgbWr+1KumwQ3YyfuWP&Z~3$p zK6qZLm133%J@e@K`lt?XX07romfE7M7g-4fmI9{&Vc9x@S)pQlnVYGvWasK7Va^#_ z_3TzQpW7~_v>1{S$vqCpOg2MTqB&=#d+J~|MnSxpqaVf9C14d9J?&wG7P?%>-*^-v zVvvGQ!Z;$54S5XK-(H>Zl?&hcx=1#4m1E0e^kcsoh8DrmuLR?Tw2?x~Iz6$nc$ez0 zp?BA8qDKxn?-WLv2MtI(?=8{UpGhbAoYdP|NnjV*zDn&vNO7j~I4P^R>h%Z=I+5C2 zZYP)6Eg+T{uho4Q6%>pi(;-ss^XwRU>L;JmL}& zq_H@TF&)W5T=7x3U=x)87x>9SgVIM8cXx zuY)IEyBhlaMr|9$8oR`U9Hw$H+QMUBtd^O7n_B;@#CJjug}-q-Ng#L4P)cHs9@}N% zEZ^zW<=G)t_RD75kN#>)3z}t!EXY8&IwX3K+)F)*Hlp6H>12vu{!nrfvY(c8bH^w! z>HA5KQ`mYy^2zv`t+ntQos&6b@^a0_qPI;-1~ZK}SH)4`bN%0*+m7sd>vAl3n!$de zhn$eaUkyYEK+8(`&tuzjjriZqV~$fLTXN#0Qhb*{7_29%oD`Au}xohs+%-(oU! zd)&P&bvIx6(joo$14w;+(KwC&{Qx6^g*W8MSH zGC5IfIGMRD5t+Ujxo>hhjegtP+y4W;K+AY+j3w-L7T6OIQmyK9HhJVvbNG&s?zc6qW8MLvmT%5Y&I^? zJsa!YKFwmf?#S>20p0CwyR&>EexQRbj($C0zIX0PKA2Vhb+|ou(4<+hN>rUc%|14L zE^PN>n9TlWA&+AIKyGm6xV_^g;{sjaqY~e?utzHwkGv}{y_-MK%Oob;n~j~&XS0A7 zJdE^bgUSmu)wEcAIy*bPMj^ z-yfF2h>YMo`*rIRf1CZy$Nm7pveX(@f6r!y>t1QZf|u>zbOfLtix_Sm{udOJKw(QP zkpJuRSgR9D`ozSnGVdD^hUxoT_}e4*gn|$#$w8g0N?sU;6n<21mebv;q(&dP;7>ne zU33;wTgsUfgsE5T2v~^8EzN|V-J0GOq%(GB&P(FPI-#hH;tAbVL@#uwxhUTkA}C$TU9+1_DXs}}uAqD07>PJ_qX+`~yWqKP}?3DUXP-5e6UjjkY zaOcdgWn88>;q&g)W#(#VKWiI&Lc{*XSzk)sf8|#mYGn12ro{d3tx$08)gS75+fj)P zA_~KsMC^hcl{4g+N zi2K_;F25o8#0xtsi>B3WkF?2n_uKF;n)9CipKr_m21(EyVa;n-pF}Dpwba-hxvgs_ zR{N+H5l?=Rxtnbi&S5eN+$kYB-~IUxSt(1PImh|FF`|%%rwT;OAq4%#E@G$Z3xZVG zQ7~qjPF_`)0^!cgaE@!0W__oe>`LaI_1z_W+l2x%`8R9w_(IzJ*BP_Me7%_X5M2Rb z-$Nd78FqhjK;8?`_1yI}H!S#Vjke4m$n0@h?AN5a+G|~xqhvPN*xU|aCdUXrq1kc! zY5!)f$$rgCZAYYsh^HjqhRzJbnD7S@9WsF83*ifIe*72z^oZ^Y2dca($L-{Tm%5^! zqnBJ`MV0Ax-KEBypmKbzdtWz6Vsm*_osy!(MUYL$1xJrnPL4`(bK2i-?G)xWV=k zO#a-=04%>pBALH&$4+;!`HTjG*#00sN$WV<%Zy{$-A|ml>AmhnXt*yxv!1?yggB+Vb}gZCzyoR1blf9E|LK z>u||$SRvjlcZj{!)z%5j25kNh{uiOl*@>j`>hQElLBu4&_dfQ8>~CAA-u{-J48f#0 zyr2(fQtbSVK40M(d5W3k3Om3Ts-Wd6Z;eZb9`}fxWi=a&oQlZoi~I?dTG{K*EvxUB zfj#CqZ$oR#FUT*ks!_QhClF*wE?6>K=Hrt_ro-!UARUB5$NQOvAj*6^pN z3DK4}a;(On1_g(vkNuhb79AX|Atv%K3C2GT!`Nur)6C+7C*`17NYUtj;v-Kn`hUr5 zH`TcOjTKsZrJ!{iJL2^uv01OtcJEGax=pxF=4C^gut$kRWJ-!pU_yRrh~2Av&x&h0 zjfCpN1FI3P1{-r@T{US+TDn5_^8@RJ`U3tIO7Wv`5|Pr`Uq)G!hw;h*am4(xB>b|H zis2mGA>zh-cy6%6ZT?7!RYOTvxypLOCcSzTL}T(C&)IZ%6py~#pN!-~)M~bq#Na1E z46?h-b8tX|c94i9QCq725pj}>PKoqXPin!ZWS-IYI%U=O z3$dH+^zB_l%9Dp6>j|MFgK6ngQ~F@)}}E9uP`*MKCnFLv;AP?MbOE zh*Iz-D>Nsfbt8#d;o$H>3;b@WZEIVtYS4E!8VXT#5cX*rZx7u zSMlx(c_=o!(>iHZ7eSbmSldu@@39NXL^p`MDJUyJ*`a!s=N*e4nd7&l{j*F+;HzCI3+;9cq|b$9GF;JU*WAKa36?MZSD&@G_Xp)VUbO zVTdW$?S-qr*y}n42E-+<&!Zrm>XcR-<>eoL=e;bYhiiXg=`$7;*~lrL^Cnmj4dS-V zP5Brt0!?&10|ckM+}c!h7mKx!psikcml7u_Jdi#DB~3w6ImWbyS!ff>GlO3tZ1Y1b zat%LKCvQCIccvr#WfD4$P+GLzMRM=|%ANE$8Mxl>mA~ZyDoWbu?-xI8Jz_a#+mPCb zWDMe|Ly)7BltXYeM(|Cdf4F0*$B5+&9)uJ_W@PEdnVWy`{(U|7CPIakh-_EOBX*Rb zTC$wNxcvvSr}yqVH0dAD{!@HN7KjdH<5u9{<9o%Bd~%h>QeDMaSZMm`bK5JQ>$6H* zxy9hQ{8w$&?BovSbz8UM~%%*&Fm!U24@U&~su z^|b?bb`Re9S{B-VzDnV%D+X)GS$&XWtt`+Zht<%u7q4RP?^r& zl4b9+2rT~`MqdS{zjReU_Z`ib$kDm}1$!EXa4B0Lu*3GA=S}8&7DYj>r}TY;D(Fy< zMaGd5n^S%zK_e^4^e-!+S(k*v(wm;p^W;c`j;>HXl~rN!a&>iV zYt;O&@$Z@ik4l5hUm$K06urUOpZ!duq5E+6aBk=L`1sF@la-?tI&<^2{rSo`Hcpez zqJUI!O~J$_PiFi$LL6Kt|F{sUPMmB$ZOiHUceK4WqKg>}Rb9WRKjMYaMnGK8_qoL7 zDPt|InTKVgQ1RG^vNQ1h5*|~UxL9L#M-07&NA44Lrd#efaO@_g2?f6wM4lJ@BP_e0arT^BG?LQwPE1BeC^HhE2!Pjji3S#TUE&82n zW4-2g{I5hcT%KTgn!FA-$|L?g5;Bxl@hQa7x1rqS#*>Rq1aeK|NkyaACP^uP8ub z%vK_CisWBPcyGgzJTV?bgTihOEcx*(^?4u*s^DXyY1#}!k0rp_T$xOxkVn%oFz}H_ zTi35E$Ag1ixSL&nPY)R@W!6gV>IX+1Y3sh8MKx_`LvwQ!1#@10UQzM3a-$u4n5uX2 zzwh_eP`;N~U4z@6-!j3ND7T>0DafsGlRbzxXubk`hkY~Zm}J~VGIwDk_YhH3+pTF(f&S)})cQ*cqWc>R_aZ!JNWN|@o#~u>mD?PbREN#|cOQpL&epNwP zL3NYb=todb3m3Dpni&erA<*}(S5nsp4rM)FPdh8{@gJpA3gXz9W$&B(;D5`y9@hFi z<2?hF3AKr{JxU;=Wj99G5%u>Ut#qa-ncUl8uGYt9++u}2cosE6L)DMF2mt*}Cg z)?KVmK_-}uTfSrGG^Jq6j0O)m+tY8nVvQn-l#iIL%6BCdoL|F|{61>(lCnV^$HT9# zv^zM#Ik>*AmacA{>)f_su%y+e(z4>8Kc+|`rQkE#8l~F8Mi)P#&YhQ+dO5V8;`ezKj%aLG4MOuObnnC=xA;wH-((1XTdqa9Ew5 z>Eb`P=sK`_8A$o#h@qPrlWAoNELEh~#DKO@ zGTJ?J(gg&+7GBk<5{ND`k1l&;K6t?A;$*A3F6r?k36cM4-FRgH1dvxFdCu2hR7fWSa*IkXDk9A z7BXD!*3q&4Y1}{^#tE!kPUH!VmGN3`y#C*eD&q$#qw)WnQMDQVF7%=8A0CVH?-Y&d z-T(Ik_%Z&UzA=<9IC63Q9~`%pC{krTu}To=4i^Z4%+kBV0KyE*i5I>37c})bM1b%S z(va@Miit+qs?F;kSF@Gqc>!{u@e`WXMUrRp`~TWTc%g0C0ZJ-K)doLt@=tex(9iyk^eB4f&BUZ{`=>dK#Vf?e$ij$a6WvrJF z{i^^*$3^cid}DcrahJIYCe$zgYX#o^HlQ%Iq|XOZ2~fg@u)!)E9j{Ey&e~DpkdFl^ zqPq%}vONBpYNQpkAyD1~0LWs3xM<*|pmH^nxy}^)> zAl>gM7wdlEdw@9T@W2h2+Gzwbp9FI(PJp0`gqVc-8!8Z%f*Cm-D?w7;-1Zx%>CC8xU z_ig=`mtTyI2*v10?d+}XUAp2x1<0p=e6j{wZ2$qnJOE)Rvo5@{`rnoMY__DLI;y51 zVEIM)-wy5L=isICO#c<$s0~~A_JAC4&Hqn44Yl>#)5s8btLs=jb@Bih97g~iA;qoW6=bq;nI#eMjxxRM z5trWP3L*kDxs#1fgM$Ox4d#Py)h5gC&alifxf&`e#A5!y?Qf?6xsT@C=jr*8%7XrS zlcTlb;)(G|@-2ynUNyvF5X5Ufv_%rVo`Ss~|5`i%s8pTli z0WZ3sXl`~YqhX`0_{)};5E7OH{iflp+Yb;9NO8Y8um%(H-rn*H&#Ar0k#i9*DV}!& z9cpr7f-odAH8nLZPQY@R69(1yyUo8o@cLf#o9z`>wY}^97rzUNs10Y`EMCK2&~}c9 zM9d_8PZ#y9W(+Dd)mveaLBzm2yPp-sMj$$Aw4l!&AyO`Ub&*xxv@SKuDFIaTOXlY{ z>v+{Xhfv!x#4? zhQx%$^kNH%Nq7c6?|sqaowq2lJ9qm1^$4>`X1BO8d6?h}&7=8=l*NxmkRw5THMOI4KfNZ99OjmkD2Uj_&7uFhas-N?xtutk+4G80i6c2V zCn8DN_-SZ3%8GrnT8#8PJS0;JikNJqTXCu3lF3B%}uG~bU~o<28T+@d}=!5$lUNc z-{{wn1xTf7Zee&8TO2&HglJ zxN<9j^hFe>@@0$DcaAvNv!ob5&Rl--BRsZm!*|+WC%K2&p8E@|Oo3EkuY^3pB}xYz zUOo3sw=d)&D#WNkAJue^_jQo2))qVwx@&9sE-uyR1Wd&7BD_LEaT`zCzfp-l+iyA# zc(TFlX}=Ylki~p|mM}>if)gesM6M(*?qlWnX7I>*vXq?5(MHbN$x}Nd#ho)YKAzog z{>h;5eO}oP#;}vwpbo3wu-8G$98ZJ2v6Hd1bc5k0cotq+NGrft*q8&sj!jQb#gmPa zpuTlPN#{8TEQ%YHm6g@1@f{yy;5a@;Dbv25mPvo{U>5L?3!=m`x5)8#M6)t2q&#Ah zvUnDq$umqI-WXQ)luV2cTjC?$-)^nG@L|J))&_Ll=u;{Jan2x>>&hJdPgK+`VGgZh zjZn+weZ5C+8MYf!{_=Y<`I59EZV;sIN2F-~yeKiq=-vv2hA)^pHLa|)ja9`#AQPzl zhlt4p#k|b&O-~!u$NoD~F_*?&ItfJBZy6XY{rc~K7_r&fO-9ve1>8669G)+ecBfS9 z8JW3!%#Wg!68$!hJb@I_T~8C__Br%K8L-Bu#@0KGdX93caWxyRXq!)e23i8x=-^#R zo(W3+ng89TKtzrGe1*f|;$um5*TmhamX>0Z^8paHBdH7oC2Nh10n^iUj2bL{w~Dz$A-4NA-)w8Qz0V51m zr~8f3s5iX?&Z|&bF*RHVQis5$xB7%M=*l~vv8ei(g6Lnb#(b@}3j=6+m9OV}9Rd=x z(}YiKXiT=ow$6M`2|C9rYzk*X;0mkQ5e{M~EDq9y%>J~Ri<(QKU(mJTGm44?>1?lI zvV!<7R{8Ddn-;uXcSO~O-lBR=h^KYt?aagPLyQH`zWh@-K% zGufn2v!P`2a!Gjk&5jgQK~%J?+H{_bYpvfBv5g@vx_iE#+X&?p6>Z&~sku6NWp1ER z4ZlaOw-rKq*py%z_Z^_~I%V5J9Dw95iKq4t2}?ed-l)7w4wnLRz(`-ViQImJoIXGM z!wZ-Bh%ar}#u#3|uX6`CjQ2Ns19&9b=2|M@f1Z+?i`d1_pN67RwTLWw6H;lZ&lXM% z==?kA8^D7MpK*s^Zy;#ehb@ALo*rGY4USsiO+(|Pv=+Ti6Tjk1h+JYL#EeLKSqB+A z!rAw)Pw?I(7nd2#$+WQpL&Nagkog^3Jb8m9Kzq`CVfZ$xvd65*N@b9#AiHi|_0 znhl2+=#s_i|5AA4=?*tpnz;ztL7=k?+&L}5W#8io3Rb6(w6aGq-&hCaPutJ_`j0_A zT08J7jUbVuo_hcKgnQfoOq#er62(d_wIJKNuYl0_?*ZsA!=c$kH;A1h@Ym)O7>`mXBkB`6B^?U`~I+K%UH={*7 zv+3T`Yg}$~_#Wmmy{9}!MMccfcl}X+`%A6v+P#}fF z=iP1w`CYW5g@l4h&LbLQD%Rbgbn|8AYz&=%KtaHIYgB1ptHrQ82yH_tZCIdO#HABW zaW@Moy$wV$fC~X0bcCIpO(t^32s6G{*x9fhOp!Uc2c2^T`%X4PW zjYKUWrRB=@qI8no&$mUrq6C2W8b+zDDyLhRo|$WZ9If)nao0dME zr>z@IUJYA0hL;!_H}g=|W$B}#LKUlB;|_VmNnJs>%7e&CQ zS{;oCeGxUfeAM_Y*X{~VLz3jnYO~F0zN^u6=H~ZJR)}0b1_3Y6>y|8~y8VJPg3OX+kq9N3Z|D)& zxm-1~Lub2jMv5)nBW{tM1rz33V=&lm;_FH%H_p}B{@%~7zkL;F3*J805rZfMZ;urP zBBl$}^aq1uS95TrTuz(+qW5phGQp%}CLnATn8ba@7?~NTcTGAZuC4}wk;(bN3h=!2 z_x!}m(fv9z>O^Vo;=Wgl-^saX`n5ZM#juETFRKr~;%j_IU0wX*#SV9kG-~ZTYjp42s`i5-+FlYW3|&=i)fL%LJWy`E=Nw6!-10!;vSB zxP&rqmVWncLe&MkFmuP0jw|Jknt8FA=sQwFXunMwy>LT5!`YT2aTF`e%!-te+<7yn zw4*Ad%X6%FVN`g%0V&=jOgWK{DOn3U55I#jOi$QeJ|vyCGu4IKK#@}66`5K|8j)!b zo#&rbq4eyT{DaBVVrKeJj(!w$J}#P~3@=SRPNp0fn!HE=(ZzlzBT2;b7+Iel&(_); z12apQ!_DHq>UuRjWdCv$DSy$ypbSSRyAcL=tcNvCD?rr+syy~4V?a2mARhY4mlHBZarb_%Iap2(J>82u2jhq|;%|)Ys5^#o zuY80Qwe6s=q_Ofs)s~T+NM+emE&S!$a)|Go;mrHU`c-ITlk{s$t)arz4A654=j^pT z8zpNv-*^Nu>ZSJ5>l_X3la*_rT+?4Vc{))?aa#p-?A+VidzmsF{Or@CiJd7~dC5rJ zsh^a^-s<;jW*Fz~BOtz?iCMv}R@I_Rr>)1pKG`}@Tm0U;P=9e~EtrfhgTq)_8Y7CBv)DG4U$5aBwW$O!01 zs%+231>amej0?QpM6_n9-cnC<`mC(paj(V3h7DSCa~cGNPRem%I+3n>2)tN|jSbC7 z=Hl`;V=+nXV@)=t&NMHOkqFxkW5($kwunbZu6;LTV^gU~N7RbPZw`hW%T%Bu zw9--RIsGEu1@X!B#cmL(Hyt9{@kVVBXd`T>`|cww@(`TR=Kn8%;yHRm&iJN8abPH| zku;5KEfV@;v1F;-(3ND|9V6vDGBQ^$fkfJp-)*Hy5v)=LTePF$)Ui`9>U27dTzMfG zRI2#Vx%*En`^=>J+DW09Ry%mOq@@~c8z+XWP)xb#tpp5rN83QTiSs1A>z z%oEz=uLNHUn&DNeb)qUojUX9;#41f4b?DNUa{+B_chN;$^T;s^PWBUV*)#(2G~N&d54k1m+F0392029x zDdo9g=<6xII>c@GX^bpr@$x9fb|J=fGQI($1%J*IV$Ed)>{ozZcswiHDzPQLU zkWloX?`L}r?CXdy^`YZu*BEFvCebv6%|vscG7v~y{B0*bSIyM;71a(tyfRp zXQapSJG761ngQgu7RLrifpj5JKZk8)#mDbrAp`$ACpULqL0%*=UjeiX5W~HE?-ozS zAoU?LnoQ~+d2_(o1Z*O5K>9J{-!}UZS#op42PAQBF;X#gbC#u;hSlBB-+CKn`aT-B ztYEXifLhIgXBbL~K`-#xi6YUQa_`W~MEdUNm(s=Z(=Y=H0XA2{>KA|bve`J;9F|Sp z;v{q|COP<>hZcW{qdr}FuK$V9zx>H#gCV54gG|ycoY)JHNuiRa0^?Lw9dUHJMhgT9 zU8d7bo~LCcB}K($oC+%)(evV{8D0Y(^@b*>TVUSj00^W4uo5;F7qnrSl=c zSyNY3%8%l0a9kv*yd7Ev(s%4_trBmM^8L`zRrL8Q(&1_P>>Wd^ZMc zd$RG`9U#A}#xUH?AI>D-(b26-yb<>sI%*(2&Y1ORBFy#-kuBorn6Z5Rwbtw-g8R;k zzgJlFKUS%!ciKYp=G0%r(*;-+aLQtDnW29uyp(!6c{|a#sX?eq6b-qa;`c&o6f$nk zHIK62MTWo`VI_Ds0Wa5en0A|sA1*EJAHQbI9qw+up#oHVJT|FBTeeQ%tN{!JP)`62 zAj>MNG)O4qc>_$GNwMjktO5A5N)ObB*8loaY%Hm&I>pXpMC$#&Ia-YM(@=%9dmR?w z;zAl!zlZtLIHdM7$jbK$_p#IYtLS>Hh`gdvG|LENMGZf#0 z45(U(-E^W&U3a3u{@nAwC(qG5Vz5zz-n|eL>qbX@PZ>?bXAPf&T$;Dg$qwBJq$1P*eqD2{J-9ep?O#|-!qF* z$I1tGor+2#@OFK=mnnImPG28oL_B98SB@5L{rs`sfMRjt%}~-O=f8lTnlt=^Nlvd+ z337moxs-&Uer1+hpsA1LcEQJ6HDtAmp@k`5lh?kyGcn6R?(>01T9OfkK9r@Vm)2< zC~P*&I*4XPe<&$E`VJF&X|WkVX#DbJL$7NByDa=;qhp8hYv4^g6A-b#w1dwK_ovka zRg@Au_te;le0a^Hp}*Ov_$TsTx84Ui_WygP@U@A5wz*jRzrx6)Yr`Ls{{Rt>9u+>0 zXaC=-*kASW#3ON@U`N=)D;C>ov1ib&{j<}KQqdZgScJD8o>jD*b*YGcT#t*~I&_Ol zB|pkLE&cDjoGwM+6}Ak{<|86H3}@-&NB`$Ok)r%&8?z$O3+T~h1%Kaxlo}FOBm6b4E(6Tt^ zM-aA5EHdC_5nIXryJ9Eq7#+TwGV~y5Uhh55&q73W*j@=)fXtKX(9qK@(3XhOx?QKkGif8W_ z8Xe_8!Akw0t6J@Qt4)oH1N^)Y$j`tV-y`>*jg3si5wb66=$}1|!Jh@4z3Vf0;sq4H zA{8@f+ftNYZMY)BYNeBIxstS`?bG zyiu&clT^(_kEJI`r#(B@r8;Fzw>fMWa+@uX4Gwrgi`af~Kw7s($VqB8dypY8>2bM8e5#u>mP zu&1yBGl=))<&!jSd(8&Vzc)%E^+)wOH_eZza+I=~t>^XCG<0ZS;if4U1`mCD)7j1k zzsXZ6-!w(~Xvf!&QlJJajy5ox*!cxf;YiPhnU7_A1xh}mk+Os*Zx1sFJ#f8By31#p z`r8RxZkMvG+dWuasHMGuF0mdE;*Q!iuh@=u?Bc|>4YfPbJLf-w#ogf^}ft1Dlb&w`hlHp}H zrYK#uc!c!W_#}rbb$tsemA9}THTlYHn+ETnv@;ur>|axRxP7pYm^?`gMX~Uz-`sl> zbsybAB^r*YZK;Bai;LGeFq(uGw(BU&U8OwLsX<$;Y&N6fiP47_|35r^bzD>5|Gx?- zf=EaTNOw0#Nq09$NZ06)R=Sav9F2gabV^Hijz$=b_4nMxi zK0wW5b&i%l5&IsprQiu<7RPdp`y57&xAkKN7&+rghR#g8yZJ#H119rnif1{KjieQfIP~6_=(JTlyuL1AK;Gyd~#kkf*XNhJ(+B2-#a^L zJMB0ug*E5@bRm;N?a%%aTq;%4LST%pi@u5_Tv?y7+fY(l#q;qki zbV=(z>4yR>jN_AgQpul+3cErH?Qr6oJ@w(`Kx+&t#^UG#biG44mxEKv{54|WTEAVP zX!YBJ3ErQXPq4xyw+9kTI~@&J3A6b8=1*1$GE3r!xeD~E+dBg;)ULZF)xw!48AAGO zS3GmMOkau@i&+PP4Z1K9IwI3&GPlDD!kv@67>-dEjnBV#`^78W1p$cJWYz!wC zN4To0o+Jc%_^GM_6yM3@fbWyZR4D%D3Mm7ue)_tX={%n$7UKd}%UXMk@mS^mA=z|v zb)#w&$%KmL&Zh? zH^YcH^lM!1Vo8GTA*RvM&&ALG&!Hk z<2#pi=I;4UOlE+m{x)Nm6VRo>Li28cWQKl zPXDO#ckF_X?=(RxpX770!9?1j80U_W$~fi@694$^>?4}6cbGZW2@D*C-)r#nu+<(c z-Iz}rZpEgmnWxGv_ZBb>Jn^QSZCuG1h+maOw_5J^q_U)sBmef^-937O;1dw2_;6WZ{$?tc1rrGKXg@KLg+r&q{S!AY7G7_$KcoapTjxt{$V;2 zk_@{|Ud5%6YO!k4R@QB-$CqbKIu12i{N7L>B20gIkbEkvqq#526&yd^|UjaS+V`4b&7-POt_Rd{T2Po7@XD#A}q;od0Y4QhU+XwD`{PkfeF>~ zOMD;;?O|K$j3Y>sDF`%H&c`w#cK9$n3^;3)*g+HPQ~*8DL;+buP8CPRUelZ7b&@hvRd8iz%0Pur*GSeaZ$3I$ zOvR|niI3_puw`wmEtk`DYTMe&ebp#&UWN5aFJM%pO{%)}@9^D03n_zWARe6*H$F(Z44!Z{Q8UTvP7W0v9jz3tRlVcSkxU~P z;a=5hVNfNsN+FI%XR5s~HL|&6;BrUqE#T4?$+J}Szyeg+B>p;?hAJn~Dx{G^H^TN| z+p@VUrXmd8MPYuB5TmegcrfC(j3N?};kY<&S!sCN@_$zZYhK zK_3>9kHy^vzAuQPTD0eWeNE0^Q1v?Ci*Sgv-LzL7O6`3KB>7xES)nKZOheSiFmrCZ zj4hrvV$rj3JR;`3B=%b}5;!zolL;<19yN;kS;?$5yTUUYSZEclGpUvuL7DValB&nr zpcX}cG4QrvD4s!*%8d5htZ|x^W|KvYRYqB}X&TndKWZO?bKN(Qy%diDz!MMZ%ctS- zNo{`5>nhAr=p9Qb$vT2sdAOmJQ`I=qpX!Iuk-qu$zZ6h)f&+yHv5Gx5wIwz;kLof5-)^agX(E-<^>#E%ZcQ`23%huMONjoeq}LW#%CQT%HUqE%j8ZV?*>sf z4ZG%VI^r?8+8&Q_!_Kx^<-hwc%lx7h9ow)nC~aOp#90E1TNl=`yjAEClfOJf2i!=I zOZX3ihPdjj_pPRnR{>OGFdV)lJT0u(5Ey@l)8*jCTqkOKdB`uSDFz{Um~qSJlw`Vl z_xSt+i66c`ihn(Ev4&PxJ!9aYbOERN`xh~RLT60VyRDPXgj}n*hD{vz|5Hqy3>Xrv zCY^GXTWu)j;<=sM>I1#e)TkPIDXf2@mueTleRGKBsP+++fE9j>Z^H9wT*$EY@iN(4 zWP|(aZ_J)3e?6h(xKlBVa*kIG^iyWOB>4N^qM7&YcCWMR&d)bnfzKqp-!c@V9FP2O zfZv{;P|8O2MvZMZWtW(WN7Fh=zP<{0LsyP}(`ybgAsH6?U-s>R_UB0xX<^@PXwo7! z!*@M`k&!cfAZbZgf~)JMG_kq{ysQV zXQ!9^YgZ+HxyM!l{(n>Smiu$EviaFJT?_75YT70K3=r+y+YDT5xVT^q{$wNzdl~ED`W^;?i73yJ50ru zEFTfs7D~xYA{Eo1zV+n*Lo>r#-b~G6JTI)#mGnefn!KA(&1r= zwJRIzf32JN-(4Bw>?Xc@R>*5_NK;qXg-qh%%%Ocv4)@((|8nGKA9vbNNgcZPKi}s( z7D#UVJa#%-tcxEFU}qp{VjIsm{$ZGWxF-q3l`fpcKlXd1R6R(Mx|ge1_1_+cL_SW zY1M`u!8qJj)3Fc1fhpg@2kx#87j(7LX#VG%yq9Si{i;Ub!T>#~KG~Ea@n+nv54UeC zA|@~lLh(IjBP1q8htFxrM%n)u4@mvDKe}yN{jveeFa>P(+YrdKTyC*c!IuS2?{`)m z=6{+gCs}2ksK+OpWyfVf)zjAOwkLc0S zX~V>Pfw&I~5<~QvvUgk#1AGUkwf6iP&Qw%ver+ zCTiU+GjLVj{C#K)uo?Q|zUV9=OLq=dG;ybK;9h*SpGDq+vfo?XU2urZG&y(QxF$2R zu&|C4#R5fL9Vpma!8l;dB0+q5QaxyHIwOO!5W*>%o4Xp4@MO#BZys1GkeWcKgkz<{Wfa zJT4jl3+C;ybwuF$LvVQ}v&>7Ns{@>ly>2|540gXiFcpVJ^Upc7*E;Kw&T``~ejOiB zy>gG%+;XwM0P_0FLpi+OxIk<$Av7CR=eStqE0jMePw}(feV*Xl%kBU%(EWANO16oG zS{A2U-=02eraXP~wsHpV_AX#ndhBiD=?Ik*f3O2Ql>wfG#n~R@wDYWzv@XbtoC(#eJ5;I!yKi}}Pt8@X@am@BKX;9j`3LWyd(4?aXPLf*7d9p?v_kNZ;) z-=x+pqku8q0#MDt2%G7#%gD$GoxZz!FBw^K8=Oz+O*X>|8UY#)kwv8y<@BNQ^c}}E z85st=4FPO)vF?ML5v|BW(@K-ZfA?6^_$K&o9 zT?(e3dUaHYuZp(idep=dCFP;lFbW+`Ijv`sg0Y%LlTlO_iu$l$$>5tSgw_xFOLAb*z6qabofE&)wnVF^r z2l6``4W)hH{EYA+Qp*()Wg`nbI88`Ram0$48OY&3>xZrnj}IHP*xv?+fzePTiXP2& zH~!W2!a+_L-5qExDR^t#jh3+(67jx1obC|~!8n^IlaLkxS^L%EPYlx!LVY z$)MK?P0?!PEjetJCy-kUJRzrqWs{ZANPn1jCaVn4pgN9!xih(MARFZ*-12o1&U572 zQZz&>mwiBmnn*Hryx~m5WON`SPosb1#WF|?I;&^{ zB$mn@$^nT@5U|`avV}3}tm^nXL|v_Zhw~~^`bP}j>$*mIZB~c7gW%8gt*v*$0?6Nu z$81LYh|4t!EGA|^zzp#7oaaT-B#7a^F2$8BFZXg@Y!)MaC3e2ngQBL*VqbS zoECA}!YK;n;_@WFl3alz#N6w*?HE))KWooFyjV*0rmP%{_)6fBo1661Yn_^!f$}cw zO6kQ5WPR+97eO?*QPa&JqLThVu2qsWm4f5QKv8b2hfghcOYBYixNBj?>9h)fc|Cn7 z;Lc9KlFlU?kcr&Bb=Q%>dL9f^umLqItYTUmbi=gUsln5G|3HN#v6~HY_P$9fK%(|@ zi=Baj=-CqV;-T|*8e0}1rYFO6_2OA{0jpvAZ&ydj9|eqoU0|F#-z$uf#wZA{{>ANo zi0%&&E^>c&Y%KqVD)H+Yme8Oogn99RllhR~hI4}^?rrZL7DT-S{5!Fg`J$omzVvdinAiPp{|!E9xFL_FX& zeky5BVz&{`3p36=e@@(Ajzl#`Kab8 zVe#!d2oyCqGnZG~F9?1(!7c)guGLbd(ACkP04X94gP^IYvyA2bKX5UvW~1o_OfCXU z4R_mM{@c?LrZEfJY3vVQG?@sR4P|}TpC&uC`%SbK$N3FMQYxoaN`HKb8mF&i2u2Wz z-L!-Dq`yV$fV%?kceblL0!ChN9N#>In0dwH^l*97k2Km1i*-$DQ=I5@MR(?1-LP z5L-V(A7b_P^~aT%olanKU04mXzKy+r`O=%!x+-&cbk4%cdGV;pW_s>TdwYF2`WY+& zI*@gVL>v>a5T&_L5spD_`lKjCE|22gSmT3CjPh?&dgDx2%BE5#g~a`obpxCcHwPN& z>x}nOS!+H{yT{lkCJmehdVKKt7mNhphqLhmHiwdxhhJ1S#08Pa!)JC|1o2*&q8y z$98>;AnsU$+u9e_=h>7|M#s@W`b8yU(kgrtQ5@V76X7>7f)+uD5e~@S0isrJEswMN z44vK5puEay_&8b2LpjhSP!{BwBK)v2GDr{yNW2{LrTc@1zfg6ACmg58W(e@-uU-u3i(#;%;@dS_)bf86i?9kKh^RuV&*|*AYId_lLNI#}9XH z3#1Fh|9Xf4iFFNQi@(U^;+dZfTF&UVF+T4%J#kpkS##*?GwHVo*e5~Twz-}8HEY&v ze)WtG=bg_jmW+)F|J`s_qyNot>?xAjd$Cy>GRj#4T?scaBJzqE#&3pt z6&XRjkcsh5cz2H-G#0sxz0h@b`AtZVP+3HDNnQBKqsrt6Y9H4ZHS<3M2r?vMsk&&{m z+KhR?fGYofHZcI+ZEw7H!h&ov|xtNfH8ib45YvxyF9igj)p<1zO01VFU zgDJh0jhGuil0})9j?_NRmIk*clAj$3rC4ac)AmK=<=X`H&nG87d`+z&Qix_QTVB@7 zeb|};NBHf)viwWlIegPG6|7z_ftlP~s=?6e{IEq~S3DZz+F6X zo8NKL0b7=rKjLJZjsgbuus)vm)J9i?B>sH1*ZeN=X-Ud*Hc`pTInXoZmt15Qqk-3x z%HElY|=@3r4lFC`wH zjzRVG*p_n_7y@>BjuCnE=t~V9)(0g0>mAVG`pXYP1EM5L`DXf<#kawqK!FB=6he9zHq3)ntvGR+V$?>Q1ieE+#R<9wd2u&|!7=FFMTTIr1}Yg!S%J}@F3 z21FkJj(xDMA_3K?tVZ(RQ74Xm?O&3ynEmjFKM5hv|B2U7N9>@Ebi4)0(U`ky$bV5Po7dJq*9RfAgD3W_WyD z<>2lmCZ451E$d9q$7P>{Ka48x2acZk8H&}e-pmuy&d8DF2R6nSQu~b6^kFlUG_MOwrY{4EmDt_E=Gwz0#oA zNFz$2kE@M-| znts>m5riu7`L6=3#(<(;dzI-XGHcLD~Kh|rxnm!*@S%Lp1WLeP{uz>xFsw_p5 zEvX`hkt>wn|>x$UoTh-wMf2uze#y@?-Oe4yYsQkUe)q{fiF}o$$aaj!y#Yv)t?p*2O z%Pd?)M{^Gpr4gq#+)8p0d?PLCCe|XjVtidfKsW2<$RQ}61_xI9Lqhsnei!%on!{-n zAASLn;-OFl^f`Ywk@6S4Vp2^^RMGT@3NtZ!0Z;jGd9=aGs0w{!!C#k5yRKrTl{v_! z50UTuoo5b=*WxpzbyO7kuuh6(g%MxMf4FgfT29M}{%o}it4QI4{Zh9)PR3GO7#tx` zm?L)!FwyF?CZ*;xtlQF{Y9<`7!P6L#FGu8o_SPnquF;MxSB_VdEWxIle1&DXL(vun z_MDajg~9IBp+9eD)ic-h8Op@y6?C~JBF&=9??Pnjzj8)F(%!zxs-z#kI!c8}3 zL)u#OLmHh9$BRfV`5Ud6^Ld9cDf-Y>o|Kzn*(X{E#Yc21Zk}HN0&CgeSO%SHgxi9Z zFfR9d_yK106IeKfG*i_y^7>PVUqb&TuD;){sBjI8 z3vM5&Q{uB$`V3Px_ ziMB+|gR6IWq0wp8lpUlb79~~$*A$eYj4UiN|1lDe8BkxM@-^W^MOvbgN4y9w&q~D2 zubF^^yW_esaK4e84p0nGIm#hQBaAz`IGDjKXUPiwMB)0YwI}XvFK(B>#M1F5<1>j= z%`v|`FC8IVNh}$3x*pe*5As+gFS`kXCOZ-^#&v>fd|3bTf4MBRX^2mGMk2&^j}t76 zxOA*eEHb@_O5;y5q)o1O0%wf06h7}w;qE5q!#VHK?9SPS1@|(ko**2)Fg1{u>V31h zj~au?wyd>rb9~3J6()u=gVTZhNcqp~J)=-y#OoY@C2yc>ZLKRyyPdjV@}shHXl&ne zzu0tt!THPfV)ReP<+@K-2Lws9FT444+BSkmrh9PNv+L?|E_NpxK*9tkcoLtPqrJ%F zfWxc~UxrIXO9THjh`)V>uegtwO-F}(aO&#sx9?X}9sp9^y7;qA76Y7jG8pq;Jq@J@k64R35{%xxP+xf4qK7$CSwdxnHwtS}oaGfUU2I;;;GYWraA736X8_q3#LZT2lPx zDh$=nwz-pb`lb)Md*|ag^%1>QbR}bAx}(HoD7D!dq}+in-)J^CR_1xA(BK_dCg1f#V|%MOXvKdLn{5njk26a zeqwA-B%5sp)yN+O3FtoJX$@)fxntL#%f#mvNQCTh5T?5XZI7yUU43ro+Y{MqCt@TBm9Putx&Oxm30zB~;9 z{36By_Po_<>s+B+0_2ws?9%R68mken>q-hBq7?jqxVma+8y*>mC+YG%9#(i$^6Ie* z2U3|)y}o!Xf>x>GbpL1vP2d@v?-XNSOHRJ=M~{mVC1n=l`iW}6O(a=jsi1tJsa@~Gt+C#`m zZlG1(RZ&uY^F@ZPdd z@>85azo&TF@G+{5y29N}ObkpG)JqSCJ^+kPMK7=Mf4GIEga}&QnN%frKfGSCO{de`GEra#?7}j-&HeyF<8Si|7_Hj8#h5Np9C7kRLs&WypNj3BqKmhPX^xJfO zc5$$?%Nx&`>O8vnwsAzH~5g+7zfNQy#Drlm%8rFc)KSW#J# zBY+H0%`Rg3T+2}9Pc72~! zznK<3?OZEW>x7)xI!~BD>by?NY(+Izo^-Z25&%`!s`7Hk?t(EPyMXgH>|O|J@|DGz zr@#6$OCUV3E6e1%K@8)6IxSz=E3)*ymV_>5!<1;44_)r`&>3swsukrZg1-!4zCbbjh81N5cT>sJ1~3z|F!k^MAXeY z>j?xt9jQjI;f|#L9l?N}NNaWbPB*zIjfRnqTj?05BghX`v8=rO#ag`CV`U$ZZJ`6V zhrx0raPk$})ffrxym+fVr4Tm;?x;KX=f3xjxIXc+^UhYMe!!qY5Al1}_g@3?$)v8g z3CdAPkW3mF`aE)n%u0hTMmS_9S;pduCEWh>R9$=}|7SH_CO^V7MY9@Hg@1y?Hn0rb-fHMqDs7KQ8ZYra1S_hI)~u7tdB@7e=2n`7LwNwW~$iCS~=DmS3!8{bfZ@Bzajo zt2RRwyx9B8=L0>z9`y9!=6oxJoY4bMu?z&s#_a2Q97t(%+7sfu4psjrM>%=*V>`Vc zMOalMm2-OJU%aJ^N{dWyjWU&uVl56fiAriMsuvw#Ha}1E^$B2uXbxs77u2XS71yhw ztH3&5#A87D#SS0;Q`Pl59y+;aFpAmKgO-b=t?*WQZ|-C;xVxWHKt;Z9w z|4HcNS*YqiVa?oDhlzIv)q8vVN{>>fUE>%78A!u?e!!>O@o1t`q60}^**Tz8#3~pa z?!JPTNxs!B||CVi26YSZ3fX@k2_;FPMBkd<2d@ags%qvfcag zX7c$yWj=;VPRR>lEm+HO4RD(Bwajqz0Z!=?YxB(uN*Bzmo^2;Mt7%>`#aIpJCGYKQ zlC?=}Qn}3C3lt^Y7{ncvZ)gQ_>81Y#?Am%i^Q~d1U%zk^3Y>1OB5>aB%}-&Dj@El* z4sYNO3VD+qT2i2SFboQ#_)kh3XYDoJuA}hdMYPPH$24x^hic)n5&>G>z`pPAP@#uR z{b4T9R462Z)xFR8{+f@h=Z888He;EoGuHQ(y$?&3UU~QA!lM6PE563V7GM}+s_TxR z12C;JX>;T7ipM?p!P}DfHT2=}nxuo(SeovF3$j^xO`h;-TN@mAGrSDE5!dKn@{Qox z{XQ3cE9EtH7U!Q<#c@Bm$C=G%JtDXG&r=ySzN;Hew-4Tu*99q5g@{N=z+xi$*Escg zPq&EEDPW&}-24X`FLx-z2sy&7UpUKr7VvMfsRch(P_&LMxV;}hrc z(bE60^iN9bwz<#@$#CG&{I$*(?H0~T0(XV?s3B|;+qiDMlHa~<`30iPfvA-vKS>uz zT2r&~twXqLZ*Qty#m?#LRfZ0CpERlLR0QZEF_!C6e|!AreG91O&+b3{CjsQX%d}!L z?o77J-22{Om+yFM)@p2ii_mp`?Ea|XP&Ei-=MMxnM0edGodu==`2@3}Q8pSkB@V3w0`^TDZY5rFc}OMhY`<~#)=SYGsmsiK%V;$gmItfC zC;`o*Y|A)sKpMj~>VAIa;DD~wu_`1HTYv-#Q{Kk|LOiqzb*!bT8s31QQil;bRs6Qg zc*n?pSDUOHs;5I+DQhcsVwFGzjXjImZPlI4DvkADHc4q^lG{X37FfOaaOi{70W5;A z5IOY3)YLbxo)3?Bx8VLHM-Q$0<$iErj~7K-n1VS@{3dv@kpLYP<5J0Q0(8BL; zWYd`g(2wP1W#yF>#VOX7md~GkU2S7It9%R@fL%NYXt*AAPj|72x7ZG#ZK9I5zE-KP z!e4_(M4z||)W0a6Z9H~f{g)$-oIu7=2n;oXdO6u!Zlb%y&mMp{ki|RQ2*9ta91OMI zE%QtW-d86ewyVGo4^dsBe*67;1KYK@y>iH_sh|}<7zYIsBHo2)aOUToH}&YYg00?X zCBy6gn~JZ}N<~eur!~p4-bj?$&hSN_RHBCKRl#W$ z05m5wJ3vAN_jIQ1G}i8ypjuAg`pq5In?W^IT0EN^pA*l8wa>T3dZ~IRtyeDu0Mg?N z!1zt((`H_l(}RhYR&T;-`|*B|n9uQo6EUw#jg?V!dF~OzV-W5_Jvojz{Sb>&wu=1p z?43ue#W9M%*@re*_^$23;oYH0H|?}k9!Sw5YM0{DjDy%(qA9{GR#)ek=?@37h^;($ zEr5roZmahDhW}R4{-*k8XjP8D>c3UMyRO>uem;Xo6)Xf-s_sBcgDC=R|7}nKMu~_q z=hO9OS7Yvs=7W(1DtGu6<%9Fe2MzD@#tbOk)~$X$u!{CA^mF}PcW z()}Oh_q5%hIck+Hic`xce`3O0HrA+hyMINMI&Y%%ot!v~7nrDG6=F3L--mI)^fKsSE?LN|q>DHnOJl(tEYmp z_Aml`+ZyLNdRK^>clZ_0N>0&-eV8@jd)qD096t>}PV@XU0gHlP)BGnenE>?gu+w#> zZYPE*U+<&W+#{sp4-#=k1QWgKI9)>=zBq~NFVtdCLq1&G0`SV6ovWpKJ1HarMHyo6 z_>;kjnC#U%hanGO$bndy=8oJ6~CIsxBySoqlWsdqUnMhrQHee&wL6hUemA?w{k ztAn%u$-3IY{K7|%O)Dbf&$obfPzKm^a#7v1uEP@XkkIMBGnY9b0GQAL+z;UQq=4t* zr>-7|6TG(Ihv!v;bNj6ucSZ{B%Zq}?Xd9YGJABj%M7aQvs79`Ua&zr2ZR{`9t9If6dwTXRI_#D}DOiONT88Y)Qj%RD}=CN$Xjq)zFrS`Cy2@w-h({%X(Xu$dA zc%7zb?xKdFrb(y6%q~`~{t;lOxyNGKX$`=L&yB6BI}zpq0jpX;NF(kHVs$~l!>%B6 z8HjS)fsS*~S$`>jtF9}!@y}#sk)&0<)_QzY_bip`*%aL`$!6$d73=vD^vKdje7T=r2;zta!nS8CL*5AmTl`=h}GO4Do%F zgJ$)!q~zdAz{K9x@takcJ9}oKu`253$D%+Nj(f;M0!aAyJ+~r^rA~%Pbh$pOt+}zH zSu6<5@|>oEOkOH8*_pgNVnytgDA@$ z-nhanCDu@Nd>xHhDYyhvaqZ_=JUn#N?K5HvO-dYFd;1hxpMgt2_X#Fv)ClT?+K9i7 z=n4Nsi{}bp@dy9I>AH*%mhFl03zMvqe!gU zzwuf1w?+$TrRG5USqV;X(k#+7f+xx~_id7=rO7L3Rr`eOL zM5_!;s+X;7>g4GvGLt_j6Ylznqxy_^e#dGkXx{BY=H}eI8XIg9I4|c#YE!pwc3{g`!7HOXl-R${QNe62HQr;^yYoQ5hQ^ zU2C?3TO^1Ul)Z(1)JTbq9aU${PRz|OFV_oa6%`d_W2n9`tQNC>W`IujisuCOV{1VRr=XRo*^=88;OzxnkP5Iwmmj{j_^BdbHr0yX zyyND_T%k2mM{GetVvuiUt2XCkEI=D|8-VygslTqi(9Iw(vmX)L!1I5%*x<;Jait*(5J~ZbAsh2 z_CE-x%RydBX1LH!dj{fL*J)nxRm{RQ`S)rx|5NzI*of2Z@CtMtt0ZgyEOcARVA`>f zmA*sxx8v$B3m?H8)F0k+nLVm6H8XrW(%fq$BX&;H(gObJ93VG7N*}AKK%Fhee(z>G zzmLTMhbZ)EHF?ar3VS#l3krA4iYEiGLRukF8XguNBXkW74Z>I9072nR)OQfX&kvCV zKS+t0SJ#iE33WTcH(;QlxtKp)6LUJEHC9tp_C44iL|&Y`&yT($>`VBfpC@4?ND70ZU`@!e}84a zt{jKT+LTM~;=a=jGDn*BbEFdn0e6X=`UTyb0kc6rk?|ldeiOi*nx z_hd3)Te=3E==r(rbsnR1|c1BZoX6 z1g|2K6QC0v7lSb63~r0t=wd)_k!7TAB?=K}6v-z$MISBh@8u<5idH7)9z!&0`7S!InSHBgV zWV`@I?vRsuGiFjRPhExKF)Q=9-KJu3Ap8b((t%qlNLlw%nv=pVG4>$w90W0JE%lVi ziJ1%b3t^)M1N#|x`Tf%k>^i$1={WaYzwSUHW(u*Lmg~hlx2aGX(l(kreNPj&K!dKn zJJ(LOKZ5}F4G>B<0jYg%?x#IpuvkEK`&L&PyU~xsp$g<-D+>qGUQ_L4rNww5E&_4$ zTd(W<>M6?e=O~*1t7jhc@jgNvVLg$%npxsH-m*~|w9h(HH(zDseh70K8yix>i~x|Y zf#*x@a_<8E@Q$(9m21_{wk$)RM-%)WL>GG0r2P_qmUjBQS3)vU96vARRuVyQk@oR^ zhV;cNiu%Bz<=}!qiY^iFgWF&19s48TPK2wWtmQ#yMz{Q8Ejc~caD0lH`x{eL&p1Ot zN?r^8)bB=!)H^lOQ&O=WJA2y7j>ISLvsU7%^=COp0n}}--*{LyreYKb5~~292*n=| z#@G)hCcT`74(7+}@W{zDf0q&3z24kWNp$dpxHDoU>P?Br^@7cs5A4e40=ni2!xn-9 zA=&06Rj=o&AY932)n&B_>~mdqMY^zdehg{e{!>R5B~7oEeZBZ50Rf$Zo_1a!gluwQ zuu_VPy6%+8r(q~pMeJN@8Hs!Vs#OjF!?xS3cV4|&ovYAmzq+pY`k_W?)cdMcW-3kb z?DESg2F|c2YOyl<$-HBsv164mFlQf#WXm#U0a@jb#0ojW@x`UGfSWT1D@4=&V7JjT ztHf0B@W_GP@kiBqr_*5)tgHFozzD_XvxKDpcjexCj~VWtG?w2{Z8EE`hq|r;ZZy{a zOsTRel0_^m?5z5MQ!;)*zuxz8T?IWA%%&mgUQV|KeesAChIJ{_OM}*)&Cen!-Y@=r zjH-Pu#A64|0pi!(LRBZC)uHQZO9|wTtI=(;#4)Bgue{}U+??fv%+FgKD+GUnlxpfl&s1e60o(VB7z=G0!yv6L_ z?*eu$a>H*OV{zY3k5R)DXL-Q|jJnZf<%W=R$I2`X#g5in&|wofd6T&h^FOg&i~ksU zLZQ7~le>$qwV-P@)5$Iy zo8pbF7dC+-+@lfVz6IW&z-`s-qN2j0NvS9L5$~)qCHUA{K5t%Q?xItL+TlUiTmCuj z!rmyjl~;V9h+vv@$tDv*8EsM5@x00;h#?Hh#lVZqOIiC2_jY%?Ks@Y)2z#FZiPs;o zL@5Y)^zL(cHib{sJ!hM{Je&j5Gc)Ox`gN8|J>4M@xj`o~>+L7IIu5$DNv;F5l999u zwfeKJpS`@;K||4+isa)V-QP-!vZ5*6NCH9p1hzKeUo5;Z~8}J}bE?S4%tA*Xto4oP;zDsB0YE-Dz`=e1_IA6){hxi1Cbm-tsYASKpj4Yy_vOI54`HfEa=Tx+*(CMl<;SCbh*y6 zkd-AxK@z>XibKt{9TNmQS{paw_)JRCY_Kq8UF&gJbl&ll!+gCrF9ncAcwI_>_4a(< zh63PY??7Q$?(=9NF90tt*GaJrIev~J9l5p7i8rAeyaR{wNkNF2Gc}W9QJHy#_lOss@cMwA z*PT;#qv{sC&OjKkwOD69l&0fokYp8wN0(f%*#Fw=S)RTGPRDCbc!M7-3s&-s5 zS>J!s+)gI68Uq>s&J4wqbbL>;#AkKlwtd`17#aSZv3^Y?b+Hx-y7XR`dL^_n&LoCz z_3F9RtE@1`v8&ptS4_WNv-+Q)@thohzHtuJQ47rSQN{Lb5_PN{?e4<;u1|=`$z9*C zT3B>Si;$RtGs0Gr8meOtMNBZ>C#q(7$i;HSZ_COB$R+TJpHD(_RBeKSOr@lf%k9$N zvs<=wsFse{H-Oe}5qF;6@Kxm9dOCa4x)@jP1L9$&hGB3Hsn`_1)CvV~* zQR^mQ^k|r}n_#wX#qq#P?dfgf>{|b~9mx`J9|h?}CKBRrE$Mvs-GoNc*mnbq zt#Wftf|GOFT)MY%*G?K|N`VaL!tLDe-cK@LU2FRxe&l#ijg0F+^f&mw65vV-eu0pO z0!4c5aQmQ=q&Yu{Yz9T41*hJW2;Q@Tna zpaVF=Un%yR*dp$LP+{703`$Bud^AYx9t!Xb6Xh4Je*vytJFc&z{yLvShZ&1iOAmMS zOvv87Gi-5F(b0(uay){p-!1^Y5@;7hfW3wWe!tY%J@VEl2>BbHVD0B78$gr7rFf-P zs(N|pkrClr<74{!RP9{?mlg6`0V8U1uiR_qXx6Bt0_OYc)02}EK(u-w>T9<$*Sc2x zHhL7?`)o@nM^i<|z0MsF@FqglNV8Iz!NiR5^=sqs_WDs?=LxY6om}-^qhV20P+0mN z^pmnHudyW1%*E_@S(xAPGkjcFOhAyg#_}v$p0>_%id!RP;8~$c+=Z-vXKK^RdtQ;e z@^Z%LuKoY?o1QdVf6tfTvds_H>Dr1QDdd~d2Ji4ha15yb#Ez86u(g(z>)Yca&@7nJtpdA-GRIyBaZ_me(BvGzf&bbqlxO!^bHJSH zFnhRMi)~O_hl@eY1eBmxW;IGo6WZQz7H8UVTi(0*>N!dQW6MVaid;MRp!2&%`Lvd} z3&L^Jhtu_2qLLZp&9xQ{N&pQFjkDutof+P?RS9(1=r8f#p^HEH-9tjZfkj~J#57rB zXDb)tJ?O$@e$G27MTAJYK`(}T~dGBxN|0utnskz)>q^62??7Fj~^4r2Zr0M}+A&PzIFUk|cfUu6=}`;}|n zb9MVHGuabO;?nSE9czSC{;<%TxbFd4MgQ_rz-=*Dlp(sH-&`hkpSRIFeMI#C)pXTy zQ8iuI_eE(CWa$M&x*L{QP$UEdq(kZMhNTs07Lbl56a-``>2BF2MOwN+x>;)BTlM|s z@B6!RXXe~%MJ8E_P5g&#&rX1=fUFbJ>MaH|20v=L9IvOrE-lu^R0u@VilPyzjf4`d&Img8$(C zr;i;c8>KGtI%O0vUzjGR(r5&RRe_vB=J=3I!+;*R>*CV>!|zJl$qB3A^hakP9KM|_ zsEo%-$(O~&ky&dyq6y!@A>JDkCT9fTMc`mxQSce2Cc(Ij0UQOHh0p&K=e&gho<;II zTi5pq%A5-^W$)yiBU$iA_|okz&b@r%Vq;Y|wImst18Yyq;o3!m-2d9dLDgPR7!@K^ zji$t92ZLTPpd(|;g%Vz*Yg*SDLexI)ZhT*%acdKmk{WdT1_GbZVr5-5-CMq>fRuc` zKrf!YLoO(40Ob3g42*2Ok=@AftEzgx{PD>E1COq-4=3*EXFZK7{8KbaI^cF?+VpYn zQPw40NFf|USxq00nu3Y`N;c02(pU)(WXto5No?eE*ZtV*myL|QL_+ep88LD*5TO+j z9GM8&XBxuYMzNc|5%Z*Rp$2&*Kq$xWCBAp@jS6hurI6}i$iPi)AujHlpU*S~Riv_z z8YYtsdH1vmB-&7XG^Vnvpsp~{ZI0e(s!;sO^k$5dDcw}I971O%=jC_!qb(#vHfXKcA07 z+Mx&L+NvpX9KM{OTkv0gtBVUW%h*7*=5WTOqv3Z;^%FJlrzS8Z91s%c+3Mhi2?ME$ zOYg7Jv~l&4*{`-Ed4y%a1*yid`JMR@uBI4pI1Y?!M|vxN4!JaN3X6v2791)q(uJA9e?1I#1|684mx$!Mli7-H_%NgD{|PO=g5;* zg(dVEO;+YMt$(p||Alwv;MN=i${5mZ>93gEvGC^@x+4~kv+gFf*^l9 z9=d#bG*Y=k)aH@#CZNITm`H#u){ZM6x=rNb{_XzDgeT$czriD>-FQD4%f*r+0-ez! ztJ|%`iy#_ok&@-SPUn1x43)pn+`AXxY028fCACkX#7{b|iG)$JN+Q$0d zGjAO_VMa01qaIqrLe7;CYR1%G9)LRmTIP(HA;A|U9aj7jsPvP9Sbi?B#BM;S_ z29Z+a4{mkzX(6?C{~8Ep%Oz##DJ?UUjbc=}Fl3cTOQ3u3*BvY!+cEA?6^e>g3`ZFD zk;GTxk%ha&b@2HnSH7Yzx}r5xSV70FfD?w2`JeSo2ySW#c@ok6S4nX1-pknBtx|jQ zFBPQZt8$Jua&`9^qlZ=y8+R*=4A#YUN_}kkr@*am?1zZH2PWdOJ)nvCG_&NaF_lL> z*=_-rHR9M3;!h+*P?SXqD(V)&|6jPmj^S?#iA-G=P?+^(K?t6>(dZbI(2tI?Bd4Cg*y{< zmH}3bQRTnKv19=Cpipk7U2o2^*po{u&XZ&FeEn%-Jv`f~M#ii^%k&-DV)0}y^ z1c9fq*h`vOD)du(#cNq)A6;NSB_nvb$bA^PCPHtrNo^(9eq?UIy}DfUZk^=jPi+F` zOV84s??DA7g;NBywZ>jaH;?Wd#a$x*U^ZhK8XCZTMj^bx(TxX4955T4mX_KwHRmkW zCaU7q9)3Rb+c6PuzR>}sSVI3OWh_Qgy%(K#`pYMP=kIYhW+tX=4L#lpAfodCvF2p?cTqCI>ENW2-B{rY@&zVDarx4Hr7G z`eMXnu+>F_#GO2S6i#7&!2b-OV{o}b;v3a(?H~h#OS1StJ6G2w%8C|#VJQ&kr%U~8 zOpJ17CdERVHJEkn{X=gm-=$M#{{BHMrs_L&?$MZ@w!XJ~ZubdUt~S)t83VNXK|xud z&ih$6@hsQR3*6f?z#kaV1pjq56`LzjF6hzT*ytNCePik3f~r^uu<{(=bhqL$s(L~~ z_-4D|=h!S+l-y*g@$Bqa;Q5?gOph$b5FtD^VI+$G2k?RFkohY@HoyA?x2h_rIQ8@e zAcE*;4s#I<-^O60#uPfXi7u4cu@?9JugeBERR`SDmclUIlC!ed3Y_=vva(um_WeKC z)JRdCwznR|o~uxL?=5VcS@M~*ZI2cZBn#$WXj#*Pj2}i9s9N{fk@Bj<;;6Y?`zK)s zCeeBO+3`^vY{TqtVL1e9Z|8nXQ2%a&{DNmKh6pO8n{VS~ks2N98XfZZ(MxRXD-+0; zf$CT)pGg5bKGKaL$7x`Ii=CTW7r;_C#5Mj_Fyd0p)+u!8-#$Wt@C<>cnHfVNg;&qS z28WZ8b~!P=#@ymeSNKey*jh{((1HK=S^UHqS7t)@azsjYHIed0Q=m9Z*m7|SX#LzU zzU6Mx0>LB`!zV^YX})pQl3B?bLbI?*o}Ys@Ro?*mj+Q~Id!b1qrHoAFmUil}HJn9$ z87=zFP>AoF!7Ru#eFQ<6mQ^kzE~d`%cYB-BX;S-D5RtB#+4gPkyWv;Uqjy;T4}o(` zN*kXSzx~>9s}6Rvj#xU47{iuk8bly{9i#oln()xU6<_-=s+DQ4h?sK!@4X9EXK+Ae zW~2(q+e~fC1}xR;SyKH}-BdeG=lvyoS7-|$l3FA$WqW((6}zyY;NVFM=`rgL9j)ry z)v@3tTAN~{mo9AwLv#dcw;=}8j?MF*;GBqb66d`=pS4ZO%nUQ5DcA<@M<@otxdW9Z+Rcm0Fc;MFEb|#C zOjyVX)uo^?t^>*9D7M5mU5P~TLuNmYDo6D2&dX0TUpC!Hm~?EpBxM|UT}kFZ*n=cJ z9-PpULIwF!1}oGCn?X*LLhhRXP2i$&v3SQ|q(bbX+Xhh%h6W6%r*rlRC+Dkv=_l70 zQ?+DlaoF#JRL3yl^!wtY`#V3wSIX|IN>0DYoB zov#3~Ags6E_~<}kVlE|9`LBxHn~i?%qhA*+QDMR!hPF>IaY%x%F6LF= zBv!g*O5J|rfRP1Ki&~hx98ANv!j3+2s;bfL+v58CXa`h@8lTP3PpAG^xR9{+jCJvF zTr{cE#{CTPt8cVCb-bIMDbnhQlB;%pt>>-VF?An0D_oDU$;K|!`tL@@2kg{Z;@bXr=RgT3I_jOR*s<_cq1>_$iv8>w_D#f=##K+3di~m|~ zgYmb>WPtqTh4g-i`lIr4r^CyD^q1s@q_TMJR%Nzxq7y_~TkTo5SKIKwhJ&*Zq_X9b<|(L7)JK zX}|A8VG!EO!K1ICA50%V?bV_A^%nMTIlP>fn!0gi)^5!{mb|iXE0O&>Z%J=o9~UPl zF*X*~dvsY@QP=VZ_9*u1ah<#lauu$9KunwvmZ__U%KZ*a&U@B&>n(!3_mcZpkrgF! z8C!ra5#H9#qV*jAx|i~*Dd428{@3R0$@ zpGM)tA&@J5e#)zL;aW+rgjUOH%-TWdEwSFC>T#+~_0tQE=l>297;} z+{OM-4Q0G#5(RDn;#+ zrp1(3RE_#saV04|8+$58xz4`9bV~RgRO&5R)xX025QLF4wQ zgWKpr2gO2Mzd>D!3(d^y8|Utu-fi~~UIZrT`!!JX6n2gdM-W*%0>ZD1ZLZ0aix!NFa^^8YFtfuo# z*weMZqju78?!b!^-*xf?)1}_m?U_DP;Qf|>k^!&7v(+|KodU#OT&TzqEZ$_Ng8e{8!`ziTa2$S?lq z0n{zzqr;4=jbf0M*Y45-dW*ag&VponxM}TnbQe{1(|ly*&t(?nDvUt6Jl+GW7bIWG zS5LqHp-aU;!gPr7wpbvQsWsICZ4W7nDt_Y7#?SiMbgC9%*m)n{O!pd z-QE`drGn#r>3&^m$B(5S>*sS-?h1Aoi%8z~NG$`DO(v9oA7G37o+&w;l6J7O*9f_| zC);p7S*y$}I65}b6iC1qZFNFv7JBm@iw+qN(4JV+)0KaY(a<{B$&|2}R)+6%XQwGP zNfMMea7^%tG645?aW4Z}*mI_<2V{QewR;Cn6_e#n)cv}RK%rL(FlC7B~3vaq*Ivd$f-S83Tu6H8cQ^5OkIEHpeOHQHYd75)8q z=^S|N9Q!#+sJ2=Fc=r)k{xLf=9s^wQt=Flx)s)&3K;P>j#oP78;oXIiCUh%#d~lTO zP2+q2b<>aTTRhOpLKi<9QFsX!O*w%)jEsA`cJvKHP-A^!sEVX<02rvHp9D05Rs`9{x+u!f& ziptAqWTj$QJU#D2Ef`()R9LQ4#+{?Kqjv{~$jU?`#F`K0GD<_97y~fU>Wm?;8a*FO z)U4Gytfj9$LPviR{EKlg3O!draMuBbYuTzQm-)@Zny5j0$0xwWpo_-^SBW?b zNUH4F_9EvY3Dwl%a>@yjpKo|o6Hd4`9B*%fF_xgi``WBha@_WFLz_TF05PAE&G~MX z4d(QX>pR`g>eGbbO&#qz260Q2wNG8voAX1m%M=}_=DJ1#d_NgSqvIuuOLCpfhx;7m zF1zTjv4<^|KD&_fh|pDDdg)h{mOULk9$=dpg=;^>CJFZe&bolR=JJ-gbEcsOI<=UI z9Rpk*M_DXaj$~CGqbpjdB!^m?A1GB>`7U1H29v~PE!2YROItPvxAw<)ELy=#V56L_ zy!yMnTvA!@3ZKPclaJam!LGq>Mivmq8mk@F5%Vo^kEWo#tNwy|PoNA9JuS}{_%!BM zq9{7)>I|&j_%7M9eD=Ym;qJx&k^WeD$fb?-MQUC4YZVwp%gLV018f`*m#}gGtSQC&YOM= z#lSfqM5!g3r8j{j?+yUrU4u8jV8FJa4Cu}^(tb| zP&`d2Mq_}YU(*ci;gE4dL-pCWOSHdiep{OdgY__{|K{wXrd3aNZuHs4&Z&6)J)5q< zwcL8$UK}XQR+`D=uOr@%mj`FxnADa>{7g?CaMK>6wt@TqKv%Ig$wz}`r;~1x~?Iy%gfbbHflQ-NB z{zQdX@u$oN15H0i_I%LAt26lP-_94~Rq{GJb}wUM?6-M{ZH6P9B2C4CYw`mj!}Bhc zl5#OtJ)6Nc6KQRNlZ&)OQ1S_p(rbp`s+A+Y8V1RL%eAAHX>t^LPi%ma$7!~<)x57N zFVoLBF`U;ZnJJ+qy1%*Yc8W2Vuc-89q5U@EN@OS5_C6zkp7ty?bz-q?VcWf)0qnUu zPgEbc{!lzFiHT8-Bn^BX78!ghdm`Y~iPpz4t)aKstVk@W^U5DPy>ZUiLY*~*@Lxpu%fDxn!;8AL_JFub;AEV;!)>Mdz zrw-Fh>Oq&qxi!wgCP^v!rY|{=1h?;k)HU$TUutQ8ONYy~R`<(FGgv@Z!tNc#jUJtvzGDH$u4d-6SNuH(=Y0}M@q zwj0;ugYCNOZ#jxQMGGYIwWC5gtXeFWSqNAo?K3CVt+n-@V3r$qc)F@eF6IFfvp8^? z#Y%-f07_ zRs*!M6kM+VMl9a#htS6cm{{yF&`$t&@RqsbW{YlI_rM)~fQ^kYzvyim*dj!gI06F& zx#12Uo-@Lbw0IX2w&RP7!JezM1*!zNNG+qZZbk*_2@!EX7tm+x#mRoI^w`9tX6x}} zBBBr@_Op21=_3)xV082=$>u!p@e57irFiTcs36OuQA=Olu^S$li~ZE1yK1i;|Mi$O zV^B@|>Y9LF(DDmCHY9;XU|_3_dYUK?)ouf1ZSSX=m(d9zWo@{aIQH`AFI?ma6(a`B z6ov(KL$^m<*=g`8CMI*|j`!~3uiERx{~Ukrj{G~pZ?B99n>6$;(!1ZlXs zJ8`*EWeWN7XGC$kStAQM^L$c5b6(;_H7)i%@SKOQQ(iRObv0I9#t4hUdj(lRD<~CF zMP0Lz$)dY~)OFI*$ps;X$?`U%s8IQs+J^Gw_msho!^bn=X@dvy7_hIiZfiFdGS*zT zcheW_lk047-iwjSE@Nnb7=P8rW881K!Jznu#y5Bf$oFHN9QscaJ}DxmFbV~Rkb+>=n( zoRt+a*dh@jkymYw;aN`wU~)VnfXQC?$@DHljMZ?%YH*ZqS)Y*bMEI|_yO#m&8g3E5 z`S~_BN348KO5F`_qBaC>XbceOoMtHi@&RZGj`3lVTx+xjhmAP% zzL)mQj$VyYVJsL7h=F3{F=;y7hW6h5eVp7+fNaxrIV+E1=AUt#0mzYQ0P@*41{j~* znLsMrfH7W9L&r5`|dR3CC;H%34ws=BHcbgF+uH&s<$AyUs z`qjrVZ+tJUPAE7(J$>fow6{IW+zhS=_-^Z>z6YPR7UAjT<)GU+7~*6vHSmIU!{{Wn zeNwx3hB&xR9-dB=x0QXVlWuocbA_a926loU>H~>IXi|2GeT>^IeIgUB8cs&%&U`j? zbk~QbMg{L8&3kk*;M(jb1o9+SG^!D;#TW%S#w?6m}~}QtPdH+ zq9__jfw`a{PP{&yS#XSrV?Z3HYlw<6Q&9UOVqMyjTCFUSn~bHY_4JddudnR%1n4=8-3_2455G42CKf&lJt z*mxyCApQ_p3DNiN={s|1?(a<6Snjw7$}FbyWG#x}PrPyy>a?;^IuYbZL;gtpGT;xb zPdWU}Q|L9@egCU(Sa5?!x*mP6yLAp$oiI$~@_Z^P4JRnj#a`ssA4z5eo7%r=oHnms z+sI6Fp0D5kB}^=lO7_;5hBHTA_MpeM^mx3O?M`sI!`%5=_IuvROZWq2I?`IS#HIn zIbk0grwbMg2~6W@t<~%^ARz}Auf8ZFLLdi(%beiCk5cL1E*$CxQNhi(4B_mJ)6f2S zpu2@o6piSr^+9eCX=j`v7~bHCk00mWf{!~fDEI#yME&Hr^;TdM&wF|G5hDbG&D%V4 z)gU(??wgJTfh3!fO*+Jvn1WFsKguAF6SG5x*hi!t!sl#!Y9g_~C@+=^1P2Td$srA& z?xL|fW_{4jcs1DE!oTigfJNztJ}mX-vj97aZodn;B!VaDBZ{2AnPBKemYPD!f`;AI zk;A@w>(&n6u+Z30&bYjI9JOaSg;LG-5Xh`}dhn#daBQY9Zhce(szW3$yPVWO$FQWr zM$b<@$AM^BdNVBVClD0mkoHfZFl?&Qjj*$OTy}}l!F~M~E_lt`N5Wqt()~gjq{A#9 z-A2OTHB;6I0Te}n)YQ~6`6C|}o`Cf}{E*pO=COFk^2x3C#^##Uu?0sB==qM>gv)=}(y@x=QD3!z+v3tWT ziHL5{9295s62n4IkYeAvuR*&MOo$#qp4aRS$_jToZ!ZU0Saq>iubRHZ!3m3TcQ%c{ zm5J@y=_eh~At@Vq1d%`iYanH=zg`X<;M!_-VN}5Gmx+Cw{TXguz&IY~8U+!O(V7>z;7v+Fg$ZNeQ zp6^{#Q&XgjiDZm$|qyzyzu`{#da zQ9}sRtP-fln9{^m65<@A70jf*}H~;0%krIwmd4~u;|6C zN>{J<0vF8&E~^^w_En6G!116!#7k@)+hQX{JIEqc2i!sLCg)UsS7d^wdfqy>Vm1E{ z$LdI$(@3Vi&T^$guIpkg)ouoo(?mYQv2z~>{^j@85WAK-KDq&;`U)~zs-V$Tlbeau zC+|)t4yI$R$Flc)mkB&rB$r*A!$f{qf6%%Q6{HlkSCsn;D={sk@q^jP;iB%hDpA>6 zNTNCllQhrnStT5m;#(Z}J%)&MP%7yk4inA6ywXw*`Zt@LdC|$?q%Z3DxyR$#D(;;( z4xV?>q@|t|KwaPyLjy+Lr)^6Nxj#Is_m?Jlocn6w^*U*LW{81f4!%9?bb|3} z(zuU5#S(?vT`3Cp4>dGP;7PGPg5;n(r2eJQJ@0*8JtsTU;g)19Sh@Jek!U}i&rQMX zLOV%F^XS&B^RY#vM2)ZQe5(oX6M7FIjeHiZ=o8G;-9dG%`g|FQekBSGf)IHt;uH?5 zll)&2sNhYn7xZ4xE6FS2qw+G5+Lx{hpKT7jX_Pq0a$8s(CVYaOyJf}DFs-K01$R@* zxGo%mHd>u=-TN5ec;PCU^P#U7p1*-n?9Q>=#*vubt_Ew8Nteo%8$g- zlyY*`!+6i^nZ8o|n62UA-XHMk@4-x5xJ_%`rl{CKd~NoTI7APe@U z{f*It%JxtHKKhrRtU6x{%Z!(*uKv+ku3&i{4pWWoc%hRnYiOWnIWl}43^d7^1iR~e z-MwV0ZYS)(^Mt+lVA`r9!VY^Ni)wLM1C}5iQ!r6CZ_Pi#-_TsfQsf;0AGP(}mAQt# zu;rM^0L!4Ygyj9#P#^XiJe;@dW!4KtpYZF2F+osEl2IL z_pQ!@wZ*bzP`R(6xDq+?zNms`C-*YwD-7NdjL zb{RzP8}iHG@B~>>wjV;Mg*~Z80GMi7I#S+WGpt0XcL=Qc5Gip?mEfj^FG{Wqvi+|0L)l zG_~DzLx1{|u#A~~%SsV9dy{e{)GeJh=so<3aevQo<^|Obw_XNI#_0FjJJ`PQeTxt! z1J#W%YcQjan=s{S;j;-OwKT@Jy$fwl^DW$wClSZ_t`xB*UmiIgu1;a;*x;#T>teA! zF|2K_5TPk_86t$5-_{(9#m#p_jgyUyJkhkA=2!m~TR}g5vnN4FxjOnJw_7Nsks=cOdvxKVR{78CN5%{F%`(Eo5$;6<>G>f3L7+nTs6EW{qv@tE2lk=3?1E5viE?ynVB1($m1Rv$82@q zV;MRmGyXzeNnA)Y#KP?1>(DE0=Im!VvbQ?|GvCE2oLX=y7=E$S^QSQ5%DwF^kAW+H zJ-Q2S$U;*T%tXfV-s58Scw}49H;xgfyeDngqt^{uuR)QVe=^*@banORE%(P13Ym8V zNgPWO+nbGTL7FL|H-c=;;+Zm`uWOpCrw(s7G!wJq*MloGhpC<2j8zJHR$%5CO>P&)6H_-64t2p_k|(8ICu)f zLwic6h`(l;R*W-vDuU9u7LDA$GQ=*(Mc7|gzn|Vc>wfMEpNcr}n=UTg&%v`ywjX4j z$?$4E3GqUP#pYECeZZ|sj)sl8B}x6ZYw>lKlBQzg@19vHA&d7`b4->@HFX~RN;ZS- zp!@@gsD16~kN06thb^6YA|{QIjYWXJfY-4#_wiE~8e_D_Mq$f>>grG2qVvC!kE9XQ zcqj2rrGCrpU~-L$+G6ffOTVk&LYwDx*&AXKtg#^;ox6NHl(4r4-V-1<$eBb^i>NR< z<*+Vrn@!KZyUV}9hP7i5+|=r3RmV@BmR4U`@C6Rz8{zNqX4@uK8p#DQ+pjinwcK!E zt=-2loZ0aJ=sh~`%{`*tB4zq^!1vz1qCweQLP#9m-7t}E?%5GpvfSi7zgK3X4Cbj0 zoY26~K3J#eyT-9wbw%-@>^5t%U)}NZU1pzM887QjIIu|_8oSQ37W~u^)gy8*J)Q%m z$O6Z(SChYv&@U4{7a@do?`)6ph?2W+s+q}97Dl#4Gij{VdBUQ$k# z!s=5KDvnH`MuxNJMK4A*@%UVe({4_4IM-1n5=JXOi;o-hoL%TW2+R+baYaqucBplx zcy=(hXYR_VujZ}jzm_eUN?;A5(w1-e&T($FI>Nn``iA(2jlA1-MfJ>{<>5h7q?V`e zkG9e|Y2`<~exg06)WJgE#jCxqHGPEVpQZ6OImt7BcvdKpyzO8M)utsC_Lrybf~qxU zlLVwCwRg*Bkz6Jx;q-5N_nCWhNV;cPm&Z+a$4!~&n#0F<=88x!;DammH-|q%399>={ZbrRy6$XU+t>q|O9wBy+P*Uuzf*|&t>Mm$Ef%@?3#QR?RU z?!;X0ChTY58;2HQURx_`XSgoP3cQ{cs5JQ(gk`8+>aK>&D}&X^EXd3V);#M}a6|Y0 z8Jf}8>l`^Eo5R=Dt{FXmauxLAp|YUC^W!KvKvgYmG$E+nJvTKI7g z^;5~gY5D-mqo8|>otq!Ec9Va|+%skLzTaG>$VI#ClvKgYl68 zbko@K_Ah_myC_XxHF})z5<&TgUA8`b)ij5LWO&}G|1`!u_TGGAz)q=4-sgh#URdAxXU%$UK2UZ9Ey--*_T@7y;QD7lVrin<8F7h8^o4F)euA@7ekk8z z{37#eW(7C&Ctri`R`8<~%Lgp!x`X?LZ@sdnQA9kGHkCY(MPD=P_Q39Rzn7`S4~Pjz^SOg@by9 z{lcsxZj&Pm8#OYIQLFR+n8>C~$iDx|XYLrgQ>j+vWvNrhuQNwDOJF4yE>KCqP|TRs zv%H7@wqNVyeGg9RVc-oqUvaSP#qtlBm%|?VRp7j}Q8YK3(a6^#yp9C>a%9u_xxk5y zD_%5(iTZi#>lL`|)$%~;8G7qOZvHiHh#<~^Pee$atgS?Yqv6-N$@_0l_m&SDbzX9O z^{4#?qV>K`bExn;>fE*E&WI1&4bB6PUnbLG57JFEJPB%;LrD5*QFqcLJzIJzC-G86 zN=iyh?A26MZf>q;>pc7gw-PP((_WW}h6)&p`KS7s-8VYah%27I@$m4>`jc^f;}~Ja zsXX%yDj>^9lV0KE5P%QE2hgcOS!p;Fgf0;vrx%wbs%@m~?YLFD#tqWSR;R65JLpz} zC*x!IkH@}(&dFs4&3|ll85bK+huIkMjY58)rniNpLAWPNc7G0RUP-Dei3HOuklGH5{sk|*sv$wj zpd0Ab#~HfO$0Eb9HT^(FX>6l8C~Y`*AN@3w>DoAi@rzc;OEnKFLT1cg z(zOOA@yfeffOZjVUjdFCaDgh(IjsKm<}Fpbe{EJT>9o-jMq}Hw+<_vj#yJz@webcr1Z7EUhxN%yowA=}y zQj(DXndE-+M~w`=p_(c+?(kdE(dHnrY>DNSuc_GI&MTOIzR%45X^#b=Yp_sun*14J zQQXqv9UlEELC&t_uSP>R8gE4~Xg_%YXd%UjhNo zBK-pg-vIFMe*&h@+K2l7c$EFJc4O{Q9%FFR=zjH@}=Az1m2{iClzkTreKSuojwStjG=GIQr-Itj1=j3 z9mI%@d#AqitXbBGJ8$*Mb&&o_?0bSu04jmP?_agxo#&(dm@7*TRox7cg20;>%3^wR7GO^ zU%p=)UTJ=AwId?x&e_jsn&N=mzz!UnwF8(5kt5v3gJ{+lAK={^4Pmf(4DbD6ldi?! zguC;z{Ifq0CpA~CMH5yH(vuK_>xBL)%)rY`8)@^Pf=!h3C)K;qamH^Ie>rQE`A_$C z%U=8)*b?GRNTkk3wZgL)#T*4Wy+_i#^~`i>*=P|$h4#f|{0Dp!TTQNol}-ShW9{(p z4bOY~{_q_SE2$_$+3Luad4(Gu^fFr}``POk-s!>vdG<;DGN@+4_nr-rNbv6Ubqre# zkCU$^TpmP0F|E2+6HztQ^*c?*i4mV4@B}b%XOil<%j*g#RgiV*Yon~uUl*p20u2)^a0oGHVOazx~EF@|2gUdhH}z zTCwsa3mpU^(sPk4-4!Oa3sD@}Ycf9D`@6}B?$z?!xae|!+q6$XDNk6SapPo$4tgzm zr;wV23C#YP(3uy977OO*oB1=*A({Tf3OP)Vp)VVRNvwecik>i#iOby?4S{-hX`cwN zK+8s6eEJCr*E!0%tidgKYB|YwKlj?!b$o0(r>oNM-Og^JIQUwp3b<#0&ezP2o`Y$< zy_n3@Q}f2SU6R@V*fa2sR>M_~7!@kxQG5iboQ$}-$^z^2R&KMN{F?qHaxg3KmUIxq z+o_c%fJS=<3OMqt>V1h8eUtllp$o2g4%X%BA-`^x3Cn=2HULQRiC(ozq9;DP{bPYw z$=nr9^@eHd@Q(du^d>OwU&}^N?p@`(sX-$}&$y{Q@1>vhEH5q+FV#GPU;fMhTGKwv z(%%}W1L|$n^*pOc);okGn7y)47tob6(bdFMY? z-#H-eHLl8;yaWyb?5%tg^>O0d3c9e0n?%&sUiEsGx4mu)UdK&UFA5^lD3iulOmUoTzui6BAjTe5D0ohz8SE6Ej_%6`Hb>;J zMi96vNGQx+vfg2J@EnWSCd!=fF$B^jnjTz9al0WNIUSI{PbN}?1X+C60e6=Ubgp*@ zy8H4L1M-t06y^{~nwO%E{rwT-0?~ob*8|yN-38$DkW7*EsCRfzEY|B$#()d;zl1qh zT2&_^!Oyil1#(=<*)41=-rNRwPylU8JW-y&_$*_Da3H$tZocoNqHcRVQ@mi|YLcGu55OqD)!vmRySVVyjrP}-+s4}aeT@Q_Zl zh;w}c5wC(mshi)mNGb*hkpzUqYzq_MIOpNsywnuoA`nMVPUbCI&VEwhIT%=^EBAfwU*S9+-xPw&_~uqgjN0ru2s z;?YF`h{7YSfG#(@kKqod`QmNdKRHAAI7QyOD^H+fEOUyWi=RA6J_Ag!;dAw8u8>8h zRj+@f@eoNfzr5)lr?nCH(y#9|tl|6eM!H5GU$gN~-Vu6d&Q47?!*Q-I!9ftP3)aCd zmLY4Qd9Ga}4()n}JfJpER5T80WomI=r=&&X>b|@ndk}Cr>+}mC`zH9}RKwE|GLRg2 z0k@Z{8xHp`F_GknfC1)J9pA*UmK)+%hilT(l4%xa5@~av#FCeabaKTFeB4axDS)}qD`$T_-HD1aXif23pgN$` z;L}rQe?8&xFp2^5QOp+KQoQytZ|s2$6C#@^MdTq{n~`NqvUsp6{ia&Z>HGs^vp1ZS z>6!v85e!mkxrWDjI+fOP#qRgw@`m{aKgHSZTS{$LdPdx^9){BY^0C#ow0haiEA6Dq zoUnsCeJbYgreDdf=z}N6^oz}pi-DUR@Ji!IqZ)pEvA;GiJU(`_k)sf>2<1q_%#3u! zX$#@#qtwfoa`%@fbdRX!x&0ZH3p7I9lgM96-1@GycqiyE#bJv9@(4@YOW#%i zuhmdY1SFpjY?Bgs&Onk6WIo245?BrwX7wK|-?M)O7y?<%B-;BU47KV7?a0>`-XT1s zMQ2Uqn6?^?O%iy&y%sbaRzMnHSK7dau=eT4dy^6n{&#M>`Nh1v_J!whj=UZ{uel^q zxvy@Lpd}!K5mnvou7JXcq4a(`jjjoWvi5U|+SHZ|IqB~J-+htc%$&b%O0RKoj-g-1 zXj1723Dye#m>6bvE^W0K`<~j*oP8j5lML?F&B~?@>go9XcV1|i#0kI3PhYRndHf4z zpvRr|^=}3$k%kXR=1<={Mzo(El&|zhDDT_oB_^qSixsXdaz8~5$wb%J*Ni(7C%lkfcZ z2yX)MGt!0Ijjr+CGCsaN?bPy^u+L4D+jdaP^9uqmboBa@v^(dfw5ecO3ILy^WQ|qq zQ>qeh^1&u8n9Z$;sO0#tv#dRP$esG_`nqB{Jj6WL?tcF;aV#DZD=H%L|Zqf zSYJ={Ivp^U1$44N1S}KrYbC2E+Y1J)eG+1daygw#BN*CK-9B|q?&4$CVxhJ@a_7nm z@+bzj_+WxdS#xsDYv#W|>0d}fT%R>%@j57GP~J2ey1jjzw3ia6`z==b;yCb!Jwc(x zE>=o@fV-^&>g{(9(Nydspq~&w*AgvSkGiJkM9Gy_@KPdR&MxG>F46|H%O{-gQp^8h zXiix3dl#9pN>g6%{Gzc;F$U9^&Qd@;CKT4xN-cNy))T@jg#SEX2pMEwup(~UXBjFJ zp3dgdT!+@$0VO6`{c&#p5Umnhy**#{@cvemC5;+1?z*(yzLppDABbUJspk zE>nTix(EL;08msp*t43mtvL@#L2X@ceNE=Fo4S)UvU!);T!yg3#J$4ykG_3YbjRf? zIAUim@F^Bc#l%7R^$};usY#NRj_R>az%hHMZ;(u1@6Gtu&}+9|&!<^TULR!A^ZopA z-Ye^g>|NJ%5%pWa)E}C^Z~{H^Yo8r7k0qp}-d+$gRU&GIOu7lyfko)YjTm*sed|~rS zl2XhoIf_;(=Wt3NmR%l0zKT&<+~|T`;o0{^va&B~Ci&6V5fWNdb1gO0b@S<}w4}hYJv-%FZtHxrL|TtxTpEn+ z?_96LYLuf3j)wP4Jc+QyID5kqh{x)<&W5GU(QDQ$2o(O@^n zv zws;DA+A*TZ;fT-PbiXhf?@_U4x$X54l-rS|Sy?DY#o%mW7fgfcy~(nGva zj1ST=oh>hG!U|YB4Jnj13iaCBrnnb;bgvN9=3_NE1Pse;d$t^oG4dutk2F5+FX?tG zmq8Vo;((9*(T>>M^wU|7O>k^`V||!_@*R3vZ`DP{t-`y0;ZztK+!WTng^NL&XC|aq z);ls{_kFZz?^XW^PxWaxpTKu6vUYc^xHV91*%+)Vc;s{kOcp9wRkcjawq{$_xebKP zxm=#iuaoO0LC{2GZtnlhJ4C>_83I#brG z5Q)iE=?n`87jZk?U0FqqHr&V7W9ySyVi31y3VW;67uyw-YhmERci-wJbJ;$DL`!_R zJJftG-{onjeWggoFU-%%Y4K}0awugJPf;3^x^nMlV}uYtuAmapT!I4QiN?&YT(ZSs zk!Z!LRWK!8!J zOLcv={>r#!KPc+piJ?o_ddL$FI0E~x`KJ{_Z?AT8PN*h#AEjO^g7BOVe`<=}iM4Mp z&k*XHnSbcuwE%pBWlGQ`nTjwm387g!>2oa*dpv^8xN%#PJh_?t)q%>%M=|3I7(hsH zGcu)*lP(~sx*CehYuM#l9tUg*83sGC9l8SyS<8cb-Ex+APm1%!l*?COYh9PLC^$IV#sH#zi5_(!8T8mki}=1xDpG(gN^8LB|Tv>tjXPP$ci(``epvMRctoz#N1)42=4qjxw@-e z5nGi0qU&;u)+P{$~VfvV<8_nh;isU+|0gpYfP9$lg;EB#@+TIF-@Qa;xB-oy7(&>D{5mmd9~e`I=FLoDd(ko#ZeV zL16@$!(T%vzip;gyEjVV_(E3;$n!`TU87ApTYP5c>kWpj73G#ejLIN=E{7XoJ5ofO zD)ybEYMz7cx~>J?lW&vZQB)c$aQxmNc!5XvyzC0L(~C@E6Mf_8RhCsyub1iM;eU8* zh_hvtjLGOv|q${uS2;CBc;j zJ=4GYwN3Ngi2JSTNZ)9Xl2M7*{5tuQU48O_84nHkLU7wuT?MW!kl22zSFi|g9jJK0 zS3*+t-2`{quhw*CNjOtzqjG8s3$qpU6`d96MXf1Kuw^Wb3zLOr1>~5OM5Gv4toM)5 z%-5A^%Frneci8RsW&n>LKq?&#M^9bt05bqp_r}JRRsEZ*ouHdt_1zQVk6Q};Bhme+ zr)^{cXSrOOmi2EyIl_oZh;c^l4XXy>1*ybr>ge5?vkDq9)WneTR@s-90|9c63OaD{ z$wxMBpkaU2ECmTD4H6?yd<7pn?}dZw>f6@6ot>S9jzu_& z`If?rz{<^url^Fwd~xe3k-ENeTYX`11XX5PB2EZwV;ft0(1VF(<=6${-_Ut}2)9`{ zn`dY60SU1Fjq&Sh=0E_-{;39-c_2>r@R7#mzZ@aYoK$}-Xqz5RRQD=>uz z0KH$>=Hd!IS26$6dHrL*?pf!=uB=w-GOat zc`1R1bQq)y3AIS?gqICWqYOY$jpf97UN$)$P>obeJl(!0Zp`S8K^e<_^i#OJ&DneZ z9Lb+mvK&}~uRSPA9f&D{DX$O|yg<7(8@g38M8T_)jRg%tkrO{6OSXiL&GgWRy-4z% z1)xnB5RpF|_`}QyoC zX{+B^VbQAegKD&D8wF)d1M7a>?1R>ehTHuJKSx&>m4tt+n43QfIYOtgRfgdXGAakX z4MFlh0H|5}aTTPXF=qQO1RJw_UHqjIVh3wY?4ts~4h#S1!A}$*1%d*=MEJr>0Q!wUr*~= zzJ%3fXIMCf^$fuR5T=kIzd>_dn{R2HP@UDrv}CmUdIdw@RF&o3O2TCg^5?T4@te?f zAV&%XcfIkGe1~k(y9H+MY78O0-k1CD%L)YWP>Kj4$Z z7`tzx9*{*x7atqRyj+)h2V)So$`rMLKyzNj2_e^U>i+s7UX5rEAkRuKf9xtSiiZi1 zA%NO+RpKwfM!<<22kd_JrTaup^ni7L=fSKQ(=e&kBBN5sEY89Mf>&x|4i&LOc5oqm ztn~Vs`bNPTecbraKw4@OM+Zmlz~O8g0Ur^I?HE_OH&nA3w1CO%%$hq{^SLKVnO{BO z)~cAjdZ5Jkl1Kjm6)Txa=;7zBmDh%IbcH}LukV- zP1v&!7T(+Nxu1P=r4S=*&*SN%LZejj(kaINhT=oQ8$zr>@w+tO$x$oXQnPGQEovPr zOw~n+c*AXjwzaZ+>3!V142ypu!GNB6W;h`aQ4J zSRja4!(6N=V0d45f+cB2HwoI4Yg_N+9FM(B?7}ZKcM|g1@8S!#g2U_Yb+g9nJAiBcWk7+Mq;^Mc_h};hMc5O6F@CWfrm!(`>~4HCoAWg zGhr-ChbxVERR^#nrz!^8ol9#XFRPt4nYcRh!if8Qe*PLK_DstfUivrtk~V3Iub=Jv znfKyj2}zmhjTZ!(;pDXgEf-ty5bdhE6Kn4uJpbg}vspxaJ-F%E9Qmdj5UM*r?c?T0 zZCTGTHLs+1#9cQ+osK9-QLc+-%$?4nCx@`Kg{=jnDRu+ajY6PVk@}h0qD=zgjrZ9! zaY*n=lU6gZMLb&Ap#tK(7WwIWiGy>~&Ii^P=g7`s9=TQxN{6Dk*oDoJRN85*C!LST z$`yc~E}9~#?#q^_`QNzd67vdMlR7jNEN*)7Rr)q#A$uYHo<-cfmX`v1OiaAeDCk;bJZKo^`!@JM=NRQu9^AjdCmn z1$C&D+x4P-k92z1W>8VguI+ZN{upRu0&$8;P^a^x2o7t?d&>iQ(RVmhCRH6~|+P`Z4vZw{T$-$j^SPrZLdp_=`c5&z5DCsdQ1$JvWV zUY}7B41P~yA^aM%VYz?H_73jYtg9Z^x09R8Ibl7ySHr7&Hq21z?Y;xI1RbVmX_zX2 zJ^=F=tW|p#_pZuh>;~|4&q*qUnt({N*V5VFb^6`xM=`5AUE0DZ7DX1_BSEPw4I8yT*G z!>tIQqXSy_S5w1kv6oLNS&`SaUNE_SrQPL_j>&v{?0*0@aprPZJZJt5NI2ngNbeeP z>FohI5E%h^i-?9~6Q+c}G28BHR>|ivGWp`4)T5?CjQw%YmzS)_pP8veoVs(Wq)q^1 z^-MAa1mMh>iBH=Dp0rL5c?H+z5Vi@YLoB@_jK9j--`^ zjv`ZFkw`jHC?EL>j1{9rxbAE#JV(Kn@CZFzmA;^sQ*Qtx;2VV!qC!BJlm~npWeD~* zZho?elp?4!E3Nx-(68y(eb5SHOWpPMwvH;!Rzdq%Dh<)9uKSh11n$SD?$U69w;Av61AD*5i(M*0kNMppoC_TF1>Ubiz<_WXf_CeIii&N?;S&YRhwkJ~h= z1&|V}9BBk#$qJt1WjAVSWsxUGJ=zzhCky1HT)j;s6O*x7QpkdyMpb|oinMD*3t82s zgF;TO{86JsHpC()C;%t>=xd|R~TD!}!PuAO?5W{XhB$zVqhWEDiu#zcG$iZMnF zNpJzjQ54QfEb?XT@?hjQ$aA1p08M1LVtV`|cfSMd23$9;2c~6LoI`#IxGo3v-$}nv zTKDtO0%_lb2nZ5J=bf)G6UEJWuER&ZVu7?K%*(SqQnmlMDtH$&q|e4lS2Hfpt!H^d zmos3qSh9LImw9>mqr^$2a%&7@0_@CQrREb-q;mtsY^NGF-SLYTcv zX&vB{F{7o!6>HmU>mh3$PqP81NqcdJyHeR3C5$mGY$lG-AE@?^Ln7}@9lPnus~!QZ zz6^174tahMysMNuU{J%C+*#i$`0R`$nC_=R%o*n8JHosNPDs$HO8BB+W1|_GSry2R z_9SsA-S`pvG=k|}H7&~uXyFNp5py{*aykapBxl(dOkr|jF9p#4JDtD3XE&0T?CRuW z^SPTrXqe8~h2++yRl#n!>G|*fV4uF5#D=($F3f*NJ8P}<@=>t!+AAp^EfV8r3CMuO zXy+#_ObWe0hc6rSeuf9qooK_P~SvbR}n5X(01i1b7!1HGV2F);Z&XJ62NkCB~d|p6rjCiWlNb_ynQ5^&9Q?FnvVsnTopY> z){3&ImE2Fy9QPvu`_`|Y_@O^?B3S36pU-0USYY5$T56Sz6#+rfD#mWMaB_+^=%g;~ z1vsj=^O?ZF+Fi{tLWYVDXdD7bc-KbWhKxQsk*w<6kJG8vfzPLY8TaFDzptR6{6?Wn zAXcMJEI>tGcE<1cd-@2EZ8{zFhmj$6zs6PZj6KoHzv1RGd9)vf^gY>1Bt&O14B-ZU z?0w>z$@9`EURN1ZYc2JhY_#)TKj) zMVepVI`_VMW@J+LDk$BwPb>)Xaqk24aEyHSZz@9XR9HTWuD&)Ko$lc_;=*@4ni4tA zsi!G#MXm)}v>xgsgW- zuk){G^k(;so_AnB;H$886CAN7#3tgv z1E56djdh;`#i8cH!PXznN}$7TvfTYfmhvN5G)k^>%fMBMx`-kMV1FmTRSTfzeLEnIW^po@3paSau zP`{hc0c=08a(qPBM2tcSAPx(Qrey&9qf+*JoO&3j41FM6PzML}W%DMw0m0@q8+B># zbD+<&!3kKu^R;I?!?$xb8(H%+{X5i?PFpx1^#o_vzcTgAFMb^QN!}*3QHv$|A^Z#W z(SM=)pp#b-8id$6`FsOVtm}CU)`P=jaWvYL@91Rj?RF!(hp>d+aA#$TDmoeh5D@?f z^>>GmVp1>|z`tJMp}{>sm(_NXdVIf(N5isKt-|jw*KOc!D8d2XzN zvJV_-V*#T+O`-lOW}qf5?#PVwDl?K@YNdyRIOTx%ao4}J$W*fKRLg)xroRtJT+U3F z=lz1}Lr)b^JiYaNfq5vS`>aiEErWIdxL~Rt+ll2zKV3XYvGrDhkO&`x;y7v+?o0#1 zb#hz!(LCyrmcai`+zB)axihCbX#qs~{D~*QX3`~C+tgDcJiNSu9#C=-PITZ*JA)%jFZp6ZO)DXYD-QHx4r?<)g&T0N_5#E<1OFP;hr5 zTp|xhhX>L)>;BKw#7$1sV$|<`tTuQftF@1P_b{m7%Vm|EzcdM|hEzSP0!6Ud*&EnU z+|<2OgnY91HHHy6LNj~)>0X-onB^A0O1R*WzO)QpEC-i2%eZ{6BOW>ycNT1oBlFy$ z^&&J`=ACb)h9(Vp($9U93K&2v;hMoD-y$*|>1X%PEzqi)#DHzMw67l3Jcg;YlYZ{#H!^?TYkc_@+Ga9XVVBar zBnrGG2!e!NTD*YKxF48w`|j6qw=Y>j15K^)i7pL(44hG+=WJ&#U zS2z@5;8jwdzd((Lw!KbxA#AwH9DJ{-E2fP`sn8Z#^gI-HS)8P;wWIm{y6eN%A=%ah zhNj>Ve1FE*`H=s5YK7n=T!ltXV18!APU~jScYPMd{H|7^s>yhE#cJ}{1pZml3h^WX z?&c#Q!?WQG02Ux@lXJb1ThI&h;`5hQ6aS5x`~~N0uSjPOz?nAh3cw1v@6vr`b9X)) zY=hIu=gEw*K@2`~^P3-~^QF|lPOJc^O&1c15p%^%NZa9UOj1{l%FNHdPsX?B3UJQ4 z8`hf`r-u&bw=Q@5QeKC3TD)#tQJ($Q$p%MuS)7$TyMrf)L!+(Dkb{3;Ym2|nh&%fS z(Ukt4#P%4Vv3~IJ5v_8%S-|qU7OHGawkmO3n=JfM+K#LdQ@D4^i3o5_HevAgE4*1; zH3aOXW`LZnLxV4Xyg#C0Td>7QuM4J0rfhocYycJ}ZqEiR8MJbMLmOnU`v?3XF; z+=_qZ&edhF+(>BWnCLya!`vbJ$)W?U3g}?PJXy2)A{X!=jF}k1c=hJla+nsCw%Px_ z(%X+jz1ho5(=N7LpwxOS)gm*?&v{6w+&FU$%Gh7T{A5==+)q%D=3|$D+ z2EqOLT#aA!sVwA8#X+tdT<2bln>@&QprRT4Hq&UL;2oCoN8}llzy$#;gbEY<1&dO- z2gLmCyt%KkpP3y-Zp?g`_C}eul!-Er)3vjbI>UYxQhxU(*%y-+)`pWNc7c@J%mZfk zyRWqMVzZ0pd<|`;Ho@hZrbZ_hiK?t?X{{jRQq@FHD2&GGR-@ zm>W>@d-Ab`hlnJ}reFFt3BrEI=p;=4k;|Bs5W?Rmf*})3yt#9^9^c33Iea$k(oV{` zAKKV2k?@Gn7FJ zut;L4152N6ESlhyz;r2-)+Avr*%n3 zK0*AZgCECZGK-sYeD3SE_DyW!C?jV{E)Io;*-2`yae4;zF9^Li2@Uc6uUZaBIqZ*X zNgDL3hIyNLD>XA-j?)tZnklY3!~Lc{hsP0xt)Xl*$@N-=G=tPqO1C2(GSTAQ2JU4i z636&xgIN=9kIXIq<;w3iN<46U4t?YE)Sh74oSuH}fJW5Zfi38}?qf+euM{rT*~ByQzM4Y()~r8!)mcpMElkls4#e_bcZxUEx%9)ey2 z{%qTmscb@*0Fx^~54u-W4S=EPm%hFTU2Z^csc;{Eqzd;ir|qA&tL>#%x4+*J>P_7aR#8sRa*xg)IS1!c zR_v`}Cin5%tj_JZ0R@KZS>y@&*6mZ@39O>EkHVn11lp*6wGnP$zijXGS`Pp9^NVf$ z!n$Rie#Fs@0>c7xVL^&*zL588J2;GXM_144pDnW=VBJe?9gGab(T0?rbovi)XLvn~fBJSCK(|AUbGi1T9KA3dxd@ zey}5n4r981H=Cv|;^%ZGfo+cA@A9i8Jyrqj2I!puNjK=7*$zdj>C4$2!~KV>umC;u zaEZN)wWG8CM{syec1aK}huK~Ol+h94qdseU&6qdSwG3m^|L;vh-iq$IdOOT_xD% zScR`dvr7hdK`M8xw{)p$SayuRw-;K21A?b{uS%XQ?(p; zdfES5m7B|!H>+oZM8p)@59(^PX8F~m0m zn}z#T=BbF1iG6H?8FbdUe%TGWTi>oiaUj%IGcV|Bm*39^ESaTZn1ys72Zpmb1TUc* z2hMGXy#+dFQ0xPWX706nIxtjbG)?}1io}1Zt6cg2(M7n?`T|h{ZgZAHD3m=8P)p$4 z>#(7}i~M<95tAB^lbnK)dW)A^P(EcB4LNwOaVaEaozXKd_aQL(-{$gM`vK46FI}lQ zPihu*%K+MU51>&nyF@mb_c3gyWeDR8W<*H5e@nYJ zKbT}rm1XQEWGflAaW~Zb?kmJhh>{GgVA^BP)-(-`{D)sQr=Gdv*UWnil&m%E>Mv$) z9x7yKC~rQ995!|E7>K@H7hJDb>eY`L%;Kkzo1@dmqpFw8b_@g;(K%1K0TkO*Z+UZ* zK#OsL)e;iSTEF?{2K%J>vD+%O=w^Nfpo23!_%kCS=MyeuGLCNHtG|N; zogCVqZEnd1kIS-gVk4b`JE|t-j=9*9KsTDY6TNoP*Tyq)XGvwq>3v>$a^P;} zoB-b4@}SdUzm>nWTK;e{pYwd`L!r-8{n)l+OE&p9qNFGEnzOUhLQPZ6ptvT7NjDI) z_A5wR#Kq@}i2>$+!|-j%p7+g;EpwI@684!F>-)DertderoJ_esT!PM`ChxTIf5?_m zkTU@p1|;yy7JKRO%LhAKQukomx+pca2C#F0$r^E;F0W?hREnRpZF^-*nfHQ$*_s*K zdxv}s@3>(bl4jLPu^-o_uSktWCgVWBB1g<$f47_}FSwZZX>|t&+hfiPun}OYg!9Ga zpP0pETxoqVHF?H?0}mj|C0dNZ*yKSE#~ey4pspka#D7;!_Eq)(Oz9Fj@iqtBn;5}R zSV%o6N#0gpU^m~!@DNZnHV-P@Xny3(ay(T=*Y)Q~y(~F-0Dn3cmdhna`Vw$o8GWW{ zjjnop<^MEM-=xGdi+;>lp3nRkr?8VFRD-bwv#EP|{1oExMd7!vH6T*K-JOLQEkgLet=p3ru+ViPwi6q;r2(Nh$FqzLy)4)Mav$4Yz|sev+;4rfn8mSrxsbt z&m~kN%Bl&$UoTM;@3nm>I``D{ktdYiX|GQyF&mg@@v-D&rf%TM$_7t7!jFeb_X6{l zP3GJH@&xP#y03duK*4Rk0cMrjso!^U*-|M{Qb{QlQuHTzQXikgK{DmRVaTT@_b4aj z45UFZau#r~!doR!H710iRIrq$VNJ+KZK(f~PVvliEa;Vd%5sf}QN!s^&L@u?|HIAF zlQ217j?-yuQRTywd;3@R=gI?chTP}02}QFu1x*IG)BJIyQmvV+q>QhcLIz)y8dR=( z-VHoE60`=FgbvnEl$py)Q)?`&wd<@d1AcM!V)p}U;ijH`IJnQWXX#U2P-M90FPAC5 zLpinAbCRv*_|j7}ES7jJ^8-Fc?n}t7cxs~%`u9M4%3FP49C;@fiFlvC|G;aqCCs<%Q~JkZ6CAWhW7GJ|G7#Q)A6 zoHF@03Mf)65}!e58_9$vRL8CyC~NDomsp$(w0gTTNhLT0epS+EVuq z2AEs#L=WufGA9}!MG2AAG?!7md%mSJMDopHznU|bFV~^arj8tD3eRwq>-iBv@*Oy+Qj_aLu6_p0!bI)Hh4+gFd?;HH zt17W3(I3h#Whhzo4za;uh!k+{VaJA{-W_JrwtKEa(&6{3s-nTLI&ZXa^h&2Rpq92slG+3MIbQb|vsdKBBwo>$iYz9V4-lgA7sX}r|i<1_fM^0>ndFYYF4`C;Tj#YjxiC&{ymqEJXh>PTyZ># z;vVzCbL#x0W3b}7nYkr z(#XM0H)0qV64Cd91R4KWj2R$)$g1*%#!l?7y}=t;o0O0_%Hy8WrFpg)-ProMplL<} z$t_pqN$~Y2Qde)+8$Jj<=rCt~gNQy!GMg9PNY=u_Ii?CjnR2Gs1>L)d7{ML!|~Lk&GBU0V)tNv}cRn#gIE&0mR@a3$G$R<^hXgi>$b2*=F+)*}q4DD*AFXLn`<3{4C`I^32 zK^$py!$`l>(#N#zX~&X(Vac%cRL{kHqVZE*4GA+jzqF&_zB&^2>JzuWN{fTwK1m*fG@VBUD~L zJ-JwDwJKeCBTSsM%aqOl%Ar;{IGx!Y7isyxio zjil@{C~fcU7-Tbl)$y{^7gWts}ev6DDkX}84Zxlo>PmcqTY&{R$d zY~k?gyVq0S@K5o;E5Pl3KA{*1q$-nvOMc#47Dn3JUmtvFC_p~ZlnaYN`WQEXV35?> zYuS!$rkB~aeCExaCZcVA_suN%nlI+8A&6V$Vhj;#_%!|!#Ma~S!9`C4)M|SE);O>mU=&^A?gOd3B^IpD6OZeknQSx)Eqr%W zlJc9~hbz-KZnn!lwpg$s+7%35TuF8ye64Vx*DSPnmXPOs)-4&7e7k`i^5ICUoij5= zqGq`a`h^fm<>AN67F9t9d^oEg>fE=Ix1A2%!`Qy1GHx-z)NYnJ7YSV4b5WR*(=#H2>+1?vfh;z$s983!d)^&M}3D?HUuX z1$BuLV5!LkC0>4Si-&cW-l<}#p2gEVO=}+VMn{n7vi%;uLNr^n!LgXG!ESjmcm1b~ zq{a~!gr^xgr7<&p@#Ju@QdfX1jWFMqR6NnTQobQcKD~!)$g%dmmcG;rbGFWRnEEjo zW~>bd3JxXylZzm-YtLRtMKXB87Uc{LR%D42dNyCL6JEwAtyZET3y@u^uz#|6>-AM$ zzC2EU4oZfiy?*Lhk@5XPvirqD>Dfa4UstYXv%ZEF8zQ^Af7?0coW)XeSeebvT=By) zFRE;`mQ;t8g8u;H-`Oj8y640m+HbKOQSN6Ju|BX)`=lHzem*Trfk?l05h}R2h%a}x zRJZxK1*8;#M_yYGEt2nJrHhY_yB&l+SYg~v55ONj+2ru~;3uipp0tye24Q*~NB?#* zvR_r%O$+0B*@!)8;pkjAyYBmXdrB85zG^?)^*6ZH0^N}RpeBpp=$U=Ot?UKWrA93% zQq)~yLu=g538%&4gYQ6fm!}%8%bS4+uAS=g#TECPU)Cf3cudt~XT#24V4In^FRIJc zy!v2=lkf_XaW>kvDK9C??4O(0xCC;G5N4v|=M4+KSTl>Iff3?q)=+h54g-{GFMnqj z$2Zu|-7#(lOsOp4?pT>#Fy*j>BrW-P+)(XeNc>a=+wY&mF*mlL(^*5R8{O{$u^gam z7UAUyWR0j&ONt#G-574ZSSdw`+WdY7o~tUYrpMc}ZTqxsG!S9Dcc-E+=<^X;mVy+o z4ZE)o2GQQNBe3Pd6#W+3Yro%PnkQ^9#?53E`>po!A(!%uXo!s87P!5Ie(OF$ZD4-A z$GV^6DM>l5$_D!H1D|rgeG=noaP&9TJ5EWK!Rx|BrP%ytf*+-c*&hCFSmvW?(u-R& zI**HGy+BKzm(^t7E}3eYWNP)w(qMlH1b7nAlo%iPu<>u3olh%Xpd$O zv{U0>!Vq4%J1DlB?xAoS2-DHY7u~wAsK0Uk5wp|&b6^cY{(CaQLYwyt9HDpDgVNOJ z)T9IBy&62k-6KYhxcFb1WwY-L=#|fmr2Y+GK^Y4BmG>^R&2&jXF=?EcG<2muEsvRQ z)%|78NpIjyw@c7PH0Z0WY-O@x7E}Hu^iN`*m6}tkGXHm<__a@k1~6Dbd77A!-VocW z7txW>O7YpNCy=V>L=wtPgg>-H7#mDspZ1k~+g9@bn_EohzJG7cMK`%`=^@)Wo>j0l zF2}9vY6%7TtaoSzzjgTmU+yF1sg&yzttY?vL{*{_c>d!-TD1oAj+AOw@j4h$~#V-*Cgh#Gp#4 zZ+`6UP~>gVj&a(&d5bG~jfel9q1@7W?w<|zcJ7+;uk@tl!(ZvjmZPrs`4-bEvhBx5DOvSQEYF zd4R1$pl$vxa01(**eikpchIV}>Bzo5GkpSn>D|zCQOCy2a4YQ4Dp)&@+tFZ}pWDsV zov!-R5gK-NolRu)oyt~YVN{xx;R2Q_7*qX)?Vs?Nd)HO)1-6={`j~BDiecUKALWq2 z;=dtN=aRf9!JBUlT?k5DB(O_(VpvKys6TfL1#^oBCpIwU!RCFlvm%>KTXC+zBdf!Z zoGuD3a_6u*>pSRg{iiq}Jn{U;h2Mq6+)euns$fB>QsHWsIQy<@(?Sp&9ME6i2THLB_M#f@QVaUlf^y1PRiy} z3r%n|wA8>TZI7l9aE z8q5o(u@$vy%nNR1CzHS-Zg-EjY2|XoWP4UAE@eh;p7ZO8I0Uv2r4hQR3;+EV)ETgY zH2zsI%Ov)gU$1jFJ)(^!mfX_j#d-YE+KwQ#g?;`!F_VVA$+2q?$H?tqAsA4)C| zsK$tC#(y58iee)a4S^EbJwfvkeDH@~;3x4Pee(;gtN$$ibnU@wNMe|o?$x+CETB8` zN_zi3uV;)y{@~7{tWC?}j{viJ<2$N$dkVh!`WTt}w92h-yme!Tv595Y0cO%5FqmVy zom**ni`WOYxzul?gd7!;?${IOrWA9wtuwy;6+-YFb=_}czmy`P5$QrTG@nUe_wT_g zgbrF(aHxTT&=ZgMLRtQrShX+?m=#53P!7y_e+?n}$bI1M! z{7umI?xdMV)A^M8q1LV^?Px|bNk87vrqTHi>RziVsK*0oY=2dU@GXE9nHi{(d`_h% z;ZLCpF0~GOr(Hk=t3}?i2mh`$YdmUql2b|3K}P6pGnahPKic6!bq*-(4*&uzIt~R5 z4zxKb8RWG3v@~7)T6|-W|M^A%_1imS4EFbvw8`@ExjkF0!PU!Ue=EPaDXPECT@weD z@Dp&CSwBBqXTB$Qy+P|LcM4>l)IqX`VfAS6Z6x;u`wbSR)=MW~oYS0Wis**xnO%AN zAP0hSm7>$;?^~-$eXTW+7U#32cdyir5%CCRFyhk&yN~qQ)7*okXg|OHcnUL9dVu{^IcfP#3i*$jaTJWY7$n?5nD*ry zV525Aw~(aN@iD_s^S;L(EzAs=kDfX<^%_t4cdRuUxR*vf1oAp|;>vIqbf#n2u&Yirem&5-TI656#wP5zu5*y#?h^Z`VVw z8x{gu$mKk^v*1t$^@u}v=f|BY$+(!CwmP2~841Iz_#p+V0Qsqz&so|nBBR5oKki@d z`v2@uFYy{;-P%B)OF7jU9KYZhw|#M0Z9buN*M@XxvVJ9^n9PU6(#N4Pe+Sg28pLA}n6MPnoA$%%z(48+*NnIPF* zW7IpaI;>?5ksScXrIx;0)Z5Arc>K&{eXYKGgoA_N6c_+^gt(3FH8x`Dd(vP@3Cn?t zmiP1UG$=${#)pluf0A!A8S5RpTIc_U zos(q_AP8*u!`?c&mgo?AnZeHc4S7#@=L*|ci)$FmW* zc@wKVsia2|>7hl_#yDOXZ>C57tpqk6Gx6gX3`!z&P_}5^N+J=-zm-IOZK6PNDt8;P zn25L~Lty*2>yrPH2wyG~FV2fJzja5r6a{cy-h6EGP}^<%gz2x+C&vk60ig72zwwWtf=EkK=Sw@ zZfxn!cESbn`^M5cJvsWsi7O5H9?*q=W|$cvugjzGQbVfH9qmSraMjhmXOpumi3^uP zo1HZ=KV*rt$zkYDHx18h}!+;oE()0A>gVULlKe_cm*orx;`}_ zROd!_N5wHBbM_tdt-YRDuw0?HJ*t9G>?XxZ-GZF^B)QNq`4ixqnMZiAO^t&89!nzm znt$9C><5%m=~L>DyVoaGqpVs_1^Zy4oQc}hLGthRLn|R+;)DDb*wOc-2TNL(=4R2a zz(<7K*G0rNFue|)H-Q{3N)(_$8|6}TzQa2FYj^nRkpIO0*rQn+N|$sG(@AP{bKi0b zN(%RAo3JseN{(!3f}(g(ysnv^*y!?>xk!#Oay>c=lTeq>;_c^-1DOsVW3lt`%FTW* z^{fG_kazyjeWa$sK11x=H!*=ihvf1U{*V@TO>?niu6HrNZ{AaXx(AJ=M%(vZ0Bfw& zyAtZ|3|FY%G`Uku(s+_>8RUH~8b>WemOgV`)md?Wd?JyE?$uj;LyXJXw=okvCHeeb zhp%1qt8}@Z%DubW0qkwig z4bDhp$jNi)$Jxs{*>w*Qdls(p0*}~?T_@!z%FaJabK|_;tl%{RPWq>D7TxhtvvNuK z; z6>h0tW$xOh(WE~zRY(TyPr52ZfPjX@jH8^e|y!Gu$b|`qLB65=X-**KmL-G zUx&i8aUV-%(eDp0j&M-vHqeVVMTn`ypcE@#CKRd67d{gxiKYoZl=%}M>>md8 zxl$hL?QE9fe4drB;9PR`()|#>Ky78j&mpZZ?XaUIy5eXmCLv?Un|?B$Z8YV>8ZmiJ z+e0t_bA;yH(hh|r z4ZZ#>B9!z^OZFzHWL+YfPvG$S%bZS3uI=abp_Fa?Xen6I1f$R6&1++Sv}@3>;MmXk z6DQfRy1E=$@*stCzAV}nYPTdU_c6jqS@8R_5}cu{yC?9+BVj&|>1j*-BE@~1?83|_ zmhU;_X6fAI`W(J|xC6Wt!OVd|9^GFw>dTGlcDWx*wiq8{@*UG&7P-%-r8l}Ng)w6+ zVVPLO&zDtVl5O)E{d7>Q!d6& zq!S|eoQAJzLc3893G(R}i^=I=?6CF8fRJ!Jsql0)|TP?Nt!)=%`K%_LJtk__wy zb};oZTFqLPY9vNt0dIk}7z%cq@Vl47KUc)^L6(A${1qAYN)oqbz2uU{L)eV|ne?&zMgG$A+i9WZCQ&XDzTz_S_2}P? z>tO{+U11;dBy?3hyj?eRMp$1;38odHr6G2ieTa|H9&kBZ{HgMu^PNv~H7hGsQLLT} z#(H^woI$(u^II?KFPpMnI%Y&ob`_jDW@|SKhYLssFeCqN>UTuli;L{FNE%&bxIxD? zKtUy#@&anXe`h^8$56cN$-Pp-KjG2RMBP_+9~~B0$54#pdkUa)Yw<|RAMU<#i7o%+ z3Bs@K1(P`5R1K&D0lWGpXAH9 zr1w>~Y~1%`G_tbj{hMKRalIXE0ghUuQlz9(cbjX$p{1ipRWPUz!%BUG=I{5^tXk5J zeVtD!>e*ryMGwA5%@DOOuw+UF)x+(@IMw^KYnF-SnRnb`lVOcjw^;q(4(O!UX#(4~3;cm%pjdxaXry^OYstN5nqoe!3yby zZVeWGgPqIWVOpx7hJ>51`E>DivGWS&d*@TXzU`+5bd%5g5^;(qTzBs8XrR+I4QU!B zRHH~0d6tLn9vZ2~B@~qHd{{U?sGgW&KfPT=Yz;Zgo|B%+u;DySG2wXTm|N3Lte~18 zf$hrUCGbx3#br_=4O8%g#Ku0AW9GHKxW(!NyYm&E^K9VZek^7tw0bo&Es8hKq?0p~ zEFPkI-e=?HYg}Pi!{m(&CH@T)=jC_B3u4xdAuluGryRsG26*X|odT*;7>OF3QHy(e z*lt_!XI_y-CDF&M;!n$`Kb5?}soP&=3c{XtH*^7JEj(bPjP_jABP^%hI9S?lH6`IlQ%98MSu; z>LAcynLz~Qq8RX$r%tl)D5;;h*ma@UeY)RTkLJFi9i!R;fZ|zhh%0$@91H&h^BeS* zNpxwQ{#LuHc*Po%vAuwjD`+}66gMLfPTCrmf_@YNUk%)$!Qu*(JT{@Ngz?v;w3SP3 zE%($$T;mYHv%T0&RgE0-vJlhU1;Ekp8+Cd;LB_7_SK+gq-otkhzrz#pkPK!8mfV_J zHW!REuU=a@qZ^q&c9b`P#J@88Q1U&C$omHg`m7=MA0)Y?`;P6y{;K-(;3Z90Glu4< zMhrnL*hiOt2P=hFxcUW>=A6^A-biX6!*cGPGxW2qk8T!til_=bxJ%*_z#fU*&s5cC zFsk4a568Q>FWD~bZgg0{8ryd~?{Qq^&<+3bu`HkAa^4hb^&OSj#|piH(~A9kZSuNN zYIcx~jUIRhS30uU2u1gxz7si^KWjhmfN+vu2uJLAFarQ$FWGBUzV&*R+U@%hYD;S4N#nu zyS4PSP1NXw?{juQAYf8}bu3uJUa&bqJ|9`ByXTP8f2aC(aGVAT;%ONUV+|7N!c!vh zui#6yGyjJD9HT31w-qs8}Y-LLXIyp3kJFENf*VO9w80}lQQ{m8- z)LboFJ`;P7Iz1K67Yc!JWNB=SETpb2b>*`J}2x_WVibwh4Xb;4B?P8iYeArZ*O( zv`85|V$rU}^*Ty6GwmzK@b~vJQr;UkWH9r`2du8OE(bfHgP-CY8%BPj=K31LUVpoJ z^KhBS!yH!#6j{TDaT_1q8_>BbEpW`nfSdj6#B=%wtM`tMK-g)FZ{zG-$0<9Bl<{{! zDVru@GTv?ViR%fH8x3MLCOjF{@zpy&PRaCyKcdP^EslJ}$HAF2wV_wL#~*HoR&5e! z;d#nUCfS}dsw}cD(D_tO;Hv1+3*3lr>QT!cQ6!*DY0c-xES1Nvf4XuOOx1-oG!Q6) zs&3lWtxY4Xea_a8r>{2#fI2X}HSOobxX;|L*6FJSWwY0KJx53?yT&bM@?g z|4T%*aJA7xdI3TR-J!>I;D%szjUpg)w1RitjG<)?F)-1@bG^W)Xs|j~T#Dbhp5POG zq#MfeFD!VchLvzz^BrGop_Xk*hkA{2o%{%3M{tRU%=kw2)P)p6ses*)Ma{~=T*I|{H2+ru z%x&W*mt>AMxh`U8rXSNpMDSOrx#ec}`VHVstf)B~!>pL&W1M7Y^`0fW5$i!x=)uc>$Lh578bI`ktv=uA=E>{%uAp}JzYo%Qc>cW|_3;U{q7kl5W@c{|> zH~RiKZ?aWIW$MHXk#(TpkF(N6FX%v4Mam(GALVLYrJ;tsRU0KuydZ%ok)R)9nBt1; zDtoqL(5D=Rv-!pgB3Q`t6AqpkBX|Ef0}M#ka!67)?k_|iALL|?3`4UShxVhQLv5dK@FH?tE6qvA9ZIDnwd#kGhQ)#Y`&t-`+qRh+5v@tz-L4Sp zkSR3Y3D$o~{bFXmHHgcC9%tt?7q^URJ|z?3)emGpF3^ek1kUV~c)g8Rxlb{#Oee5qq9$nX46F1fhoUCenu*|mF`Zz4b9wc z$D7TQ1O+)B2|srIeoN*scwL7=VikquHPh@+m2%)I>OM%UQj2lVC4&WoqQOb=$v@`= zdY~jFeNuOUxvU{XhD<@$+77&Z^rc<7XW!Uu`%3KHv%XgtgaQI@XKaN?rO4Z`(FAHrb_1T()zCg?c z0644Gt}0HJ3y>2I{*m%lU8}Kb(%1rw&(Za#aA;SJt7)(8)>w0#DVm|w}YDkks@X&TgCEmUrf9G$#$u)#j zHm}6;+7F_G3ViWMun^V{gpo@`wn9|ssif7!#n|4LbRM4OObi%Gjy0+@enXj+gNTnQ zq+d{fLU0)T_2!%!KYZt`yCM*WXw33Y2{wO2bojy+s<>yX?lIl6?S0V#`ne*i4?iC? z=>SO|qZl5)m-J2P0MoAJ!lJByWkgVK#o)o`P|Xw(kxaj?Ufl2Ar5~~=qtZ^}9s=*1 zD5D&q-g6323dFKF_imLnEaF^P`w9{9i|2JFZk|zIzL(r#;13q=fUaS5YBq-zdhnWpp(>SEkPZ0u)T>eP_t{n0<>NiKo(k?YS{HrgT(r;(Dmw9fJeE-HWK=U(!UF* zbqGdzuZ;>QmR2KXSu^q2Q%R_t+DD6h-B9r=pI51d==39I zlho^eC?TXSX_O4@;gxbqw9Mfx`r`ScUy%J~#`0I=5B8WviKC48Y6SD%g2Q#?!LN1) zehc^daXE@j?~C3iWW1O0A1}fum_%WmaiXD|hM4+{F!f(PkP@bWj=p#jq|n&lxarUxE6D%w$C4E~? zuDm_t@IK`sTd&0kt!wcvnb3o9=(f(vicPVCet8ehcmicReLo4Q$5~)fv3!STkL`OS zoIf}pnme>xgdH_%{XIyt(~V!1Z}+Z>_W5mkP3iQwcsJ`VO&XLdZu#7&$4J$}WJ;l8 zaqVdnOuIPh47kjkVnpH*0ovnCslcc^cB04_JM#rX>orF3Rh6_ ztU_x8-257|aX1#Wor>!r{214THCy&Nj&kSuY#E%7)gf@X0XmJfczTQ(Kcu1RxtIEH zc}@`%V4KASi~>;L58N`7EC9Q68g7+oRvEaRf&KwdOrAbGg38zk(xrRaM}pwa(x_hg zX#o_>xE?;)N%>c}RcQUH$Dq3v|#5UN~ zlK(}TcguGLsN=eP(aWs6>+T-1j^pvcF{7A4v!L?>CQerb!1lETb%D$?t21azyv&4@ zV|kd*K^xrfoU$96mrkmYbPI}h2F~-(@3-Oi|Sw^I4RIx8y6{1 zH#5^8o;z~!gr|HTA|blGIIMfoT!UzmBm3kJ_HoYlW}7_Qhl*|s-=>8zE(cm$7R5~^ z1mmH#4MxUwsTI`!cil9=dTr&>i35RZXF{Nmz~7_B-}YNGj}Bzpb1WjJIpvV=_T_cC zTIA4}LAB!N(Chk~gUQnnuRg)$r=-3Bq=d+LPfdIJ21@>E@5#NkQ5pQzd4ru`p6C~D|b!oRtmPP ze&*2I5Vpl%txFb^Lm^X1J_A9<#`$WDySAaKYPiJ4g19L#CCd!!i_)?Z1LF~?jkdSZ zv^j?$0BGO5&z{+u>0sm3A$oU`Fj6uVy^>ACK|iVfW#-v=VyxTV{w4~$reaQ1D~28XQJ~b{M&O$k z1`0psT;7!%_+w0&nLL2wjQ=u0cv0|uDwk&Bw->|sB#_R`iOO85%}9_u6&G*Z2bU{K zuzLE27yHn|oYOwtOa9d__BdF)gIt%UG!>J6J1f)D;vA=>>xfGahWiWU!5ZiD!zOSa zUc)tTA=LD234KN9?FY0&-Ot15-(m05!>`J}CDxHB5GPNSLJ5b5BHWZ|&K}Z7rfU%RA!IC$Ueb} z@yj0qV+@Yt_AQSY&Qkh1ZsH5l)4o1`S_*c-~_ z;?ENDU{X(4x+9H|yMQH2UCZ{ER&+d0T-F&QLxKnOEd4BXn}2kt_z)OlRq~>+t(ACY z=?AmUtA(fs1D}YnMRAAb1%if>kgP9Ko5FzYdTy3u^`y*w9KHB=hH_W zmm8LgKr0=sx0ax2B6ynV6no>WLKPX8Oh|5CZh7MD7ZkeWw445g3*j|QHVIt6c8W7o zW+8}!jpLO5KxJW8Sii=^(Z4hyoDrZIt?=1!d{U+H&B+~*XqD|++p1GP9%;k{LMD26 zt$=^eD_M!9#l4Zv=yGF2c3{)#J?w3nlts}VL}p)C7^&j1$h5#=P~7g->6rhSQ;T)R zfTkUtahuiaE%5GUC*{UACv!62vbAG!i66pF4zQ&FO}$>cIo1=pR6qT4D>MT@}Tka>#7Jwl8iD;S!fVB`5V@$mDgq-Nr|=WT)XI_uHbl$+hoF zMX=$DL=Moa1AO8-s}3pNNgb`&g*;-RrnAc&{i{EUNSU^- zg^OysDos#0K=fWfNW0L44-?VqxtkOypYn1fai>T6q+%vketVV?<_S`X{|OfqCjfsw z%b5rpODGxV=gUqYas)5857;T@CZ+fv!u~2CkEV+n1%Z%2fIxyf2?Pl4uEE`dYjAgW zO>htH?(P~ixVyW%`zhYF6N5n>F%oP>Z-lhT69c9p4J8PPCJUC7eHBhD<#o~g{}%UEVp*nY=BhO7N!QRa zsh4@z9??moqEwdf(^gCbc*U8{O@1U=#f!1c@g%=N%^4yg(`)Gyx4H{`%;8kiXc;n2 zxkI{5`ls5QB3Iqh)3b`7y(7eO*yiR4Bfm|fz4w{rNM>A&(6fwkx))5YSW)j0} zq)!DAN}tZ8l?RZu2iBde7BNP>7`B!7yTk`E3g&w$BozdIv$kqbPtnlOEMu8Q(;;+B zm1cMZTXpKaaI*5$&l9W4*dRMMYmoG(wb!<;nupXVQBexzkNhKFffV=b^OR`khsh-> zZE&W|8mVW08qC^xL#~heyDm?Z55+LGJ4%9ea;T+#?!1R1EVVKPQmj9zr)+lal$R2b z->$oS4O!Z2;c7DxtV~nV(Y;Xu&_~jvC9}6LDmKT4%FV(d-VkRw*gn)rH;}xjv^-!@ z-twSj?NPX*=+leYaH)C`MQ_p+h6C_I+csZBu6E`RNUI#Id*w;*k#LUMQuETH3E??i z+}iJ)e677T&jw=)D)Kcje{v{N+1d-f7b|Qzv^8tq1-QwX;-V@?Y z1+(cGXgXh%iQj10-)WA8YpF*hJx^Ypv=hB@fmopNm0~)($#gb$B>v8JiK1Y|UpA{S zYce9fDNfAwC|5>l!ahT?!#LeO#;Ib}gq2H4ie@nW0Wa#5cauEx#C*iHdpSZ}p+Ay*-`bVwZ2C6b}4Y$Ys9lFlJ)s#QevYh-x!SYC+s^ zFnf>zVe3lxJ_K=Ox*MyUja0BlgT7bt$hfL+t%s#>Zl>j4o2HasS-q4MzXX&0S7O?Y zyUYmKyhEr8bVuk0nV|!fjY_ZH^SI^;kHj*0j04KPldG8_BM0<8Ru&-@rPIXgGK4#F zlg0Q&hZF(GG%SlBX1(ptGZK<3vO9nJf&Q=6%Vxe=2!bj=hHGzWZZlHJq35^WN1C-y zAdzkVz1?$M7WM(>0M4|W>i5uhH;!>T4wXveFPDYD)zWgmxRr^olEJHLaPym6MB%5R z?p=_!?St5>*p_Mpm!C}5l0wF41!p>-t;(r1t*-fB-rYhfXw5zWPb@Qh`*_hC5bEY8 zde+@TU-EaIQ$RT_5HYK>f>Es@Z@|H=9cvOTEnj&gvsK#NBvEsVv;yk$i-+FHcMEtq z$#DB0AHP1Y>pP%ZvOi59!M{FbOn=P~A_KbaB3v4!il_yl-}HXJBSJ0x{CfI%Lq?GY zzYVIwVUD_UU z*wa-&d*P{YXvaX*I@vxH82IRy zg;Xs;7mj*b7SZqrLOwpyI7~`iA-$p2iWO4kCY_gYvz)Ot$T*uoAvh)TCZi>XvBK!! zI_lwkFia-&AFepv7a|UF7z`?TX`4*1?W4iRcz=dk3MXz$r|hyQC-XEAqNRQ2e0lUM zzqx%@(hIZr!%atGHhUQG6ht}M@@8xKXaU!PU-YgO3Hd6YsAa=k&-6-VhnAbGY1;AM z&GgdI1I)7hx8zH{&fiR>@0sg$@|!lywFsc3e*fc1)7)z6KZWXV(B;!Yl;&M8yB*() z_Z^z%joTod4wGweXz@U|$bDG67Fke12OSJJmNJN2s^w9T{(zj>S#b@UM4JY|`5+N% zz^p(};;L4&bsyIfm=!;skFDr(L@ahwfO_}_>1yIQk2aTcwUJ9^=C_t7QM#3sCO`gN zm71mKkw388+6syhIjOYhqF@MH4P@=rw%*^&nh-L%w%__E2=6-Z@k%-m+s&S0%Uk`+ZrnPqed$o|mwh)-gC_W1Z|& z0-rlG{Ne{_7HaYyoF)A)MY#B@VcjN3T$D@k?LMM!a7t4eveR) z_;1V9Bgc1YqnC0WhjwQ6`->FVl=J%1WYQMGxHfU;k~LJwXU=Eqexszak6GF{DN5oz z(M50c&g4=c;v5NwI>>E0AxwE3H0KBqbzAJi1T*~#xejkK%iiCR>g=IEN~Kndp6e6L z`&;~Lq@LjOxACF~hf?Y6?_#28&DAq`4sb6r^{vv*P4;8#X{R`vrxeL+-n|(c(^cF{ zKdj)zyrS61T{0t2+40D|{?KW2&e1kPJ%$l?{yqfu!ikbW>N89#)~dCBx-)%|&iM(< zDMx*|CQtC1wWysYyKaxip3Fr4`H#U`eQ(XbN^irESS1lh_+FsI8R>mrl<8IIQq;}t zoa#|qDev0X<%)8{1>LJ8vfywNjaPcq@KD+QnO`wDKZM`fsmgQbPDl|( zbu#_4&sO1x8+jH;er+9UkC!znqiaB$_gme**O6BEA4?F`mhYUa7;#CT(;4PsnCET3 zJ=@HPzOA_px%CYM{^adnQ@(Tm2oQq{f!yd5qQR&#JMSpl*(0!|ZM7X^C#5De7kNAG4+AXPZ?^ia=c}=>GptH{yqHn{Z1%Ft373J1E1(f1#_c=B@tOph`|B&Pjf&(9s}8HVQqkW)keJT9 z4I=Vc4xfJ+8OjN_{=l|UPe1W%$Uw{!WCf4bt;-iEh#k^`x+|$gZ%7M>L$r^~k_M!F zy9;ybZ)#p2*3L#WpZ{~(yWr@>_n*w0Ir1eDk;EmD@vjilI#h_T6n~~wwOa%)aJbhI z)v#Teti1dQ^tVV}+&wT{_2l3Ac*zOZ)PoDVQVwt}mn=PnW9suL4mv+Sq zG|$<{CE#X34$YTbW6!b5c9eEw=@a`(Ld)Dhc{ z@D2U>yK+-G8|@_6je+9v7t0(>2RrF30qF1|)~jDY5vBeWID_ISZwV~a@!PvwA4O?Q zuZ6qat%gaRP~(r22IBv^KD(rSIbh>nMR2*?Hp}_D?o!nal~j{A^e*_IH8XIyS=BQe zDeip|Y{rRnFy%f&Bt#tpA})n{#|R`oYMuWPt{O$h*`{}Uy3_MhR?@iSA3p*n1R8d30eGloR;$^WRV2 z2zPxc#aRg>&ZHuojxhn_c%9zf#u_y$y&JO=K7X?}OKKy-w$&4P`dI4XqBVNWjot>Q zJXLU8SA=86k#F?V*@kHkwPCoCxRul36Y;ufCCDOdewvJ~mlv0fD zUjNA)b*TXZNy)*pf-#t0jAl-zW2EDsrWAE9va|7u?e=|Z@7 zPuLA0e^XT3m3>SnoDXApUaP}bf~lWZJ2*-9TMQ zCo#zbkSVIl1uq1ptp0bBqk`IgSbzWj8_HFb!s`3Y)%?@POF{WjjcB|>*a`nwEFc=_ z2R&_^e&1o?p>&l8SdUXxcjZwg)8zd`%&mXLcGgJookBZ2Gkl2OjPWbA@*~|X#EcQ( zEoWb%vqjUau3w%Wunr}%>RBHG5UzZ44CES>s}eh7?^WslFJ^I6g+EI-oA^btP{F`T zi`5kFCx9i-5+33qFkPp)t7R?A)9|Jd(d`UABe#7WKZX!(f!Y}$oBt~S5IMBwJv)H| zeMx5U{9qS6TE!1k51n1nX1)O5T*w0vdvI%fA+=ng{nFw2>JzX^Cto@D)oxxRb1zr^9Il_KPFhY{wUW^HN6hK zO0epet~UYW{Y%!8w)NiTPxfpsk8{P0>ZBg#hH&0EZf}JNVV1W3Q)9x>m$+=)}e(k|ny*PUU%2Z==gGl(4l&(DnR7FC@glA8d{ z4Iy6K)O0)D?KcT?gO!$~IV~n89j3%ovidoqsKUP0-RkjiUu((ce;A>=3l34l&gpomxB;3)Gu{QJMvmr*L`t z;b}k`MS0>*P~n+d=iDH8D~DnG$HZil<>0)#?w_$W6<}l#r&5`5@bG6}4xC+lyRRk# zJy@Y_nt02+vIrdK5YREGFK)f6fXn1=vb4{l2GbJh{@2&5 zug;j{RkHolCqNn_7$WUFGP`mOf6THz@wxtD{|jqC22)+z$IW|eR*94?iu(B!06)q> zkP8Peb=m?VAtbS{wPLA=&f?N$tu6s;Oy@)zwFDG_)_-|7aGV#E)~enLp-u7-ci#Zo z3CU_H)j0=>?BL`K*C+1dN~%$c)1UG;XvPTWGr}p{I4-&g*9jEzIxoqnDuF^K<*Ygv zDZz?`pZaCW9u2_qpn4>eYSK&9u$2%qpJqLaYOpI9a9Y7lIKu@dM zAqyr7d`B$vK@d1+ku(M|2S)vO7x2xGUnVn`zWXOz$HH3uJxNi`e z-bhdP5exl-;ZDLmMh?|NQOJy3&w3YJB|vo1rWg`V=!d?K=JrD7BgC{sv%hfKk#B5j ze9kds_~F+8dphOilV@POtg|sqe~YV~=Lsn{q*Tj1X+?n&QQcd%P!R^#O9?^r%CP29 zZJlVxx|ZzD4I*)kkUyx?K5A>t&0zeqr)4| zLJ-3>@yBxvZJYp9MbKi=e*|#`U#SJvCx=EV5$vxrUV$8|DjQtWRjniGbNzgJO->^BxGJa80HlY1aR=GPU}>b2X9Tk2LJ=y8zHhK2o3>X5q*Z{Yw; z+{bLj(?mXR5E5Ob?e%Bj7@P&0qQU@@n3d%3O+x_V8R=E`2Il^6l`gJEW`E}JQJ?M< zBh$5ObTQAkVR^k3>@1KxDpois;V`}dBWzC)z`kS`XAzB0*Sw24LJ&>OUWAVlR(NA zpvkz2(B9%XHs%HD@NbqsADwKj2!vUHXiOwypwFP7OQjw$bu=srxU zE11|opC!vXWv$F8goWb136ju5nn|hA;GMPp&~7-3v}_hQUnif5e*`_-j>-!9__@+@PCbd$lXpERIy8Jg@OX4?uVy$43d^9#v>OSkKQU57!JlHh-Kz(wnuUP#ExsJ)=$`t& z+Rx=_@MXP!+y~x`9oV>SAHGRFWhj(_{V@;UlqEb0E!m8iF*F?24_fjHF#&-X(zN%{kDNM+|d}(XSTViwo zTh@%FTL zsKLW7|Ikg`^mn~WxNmOC3TpPa+|#(09&w{34-M6dli|>JOq`L>64(q^ za@HrI0x98Wim!c5Q5d#t(KIQ!azpU4)5dWFQ*%?@O1`xEf$}N2ew{>O`=Xvv8-41j zcpwYH>B4nIUbbB}n@rZAfbty?@7>R>(#`Hgyw#X^qF6E8$|^xZS)hx81w-LqJ&4fX zvT{p)5&b0Ung1=MucQDdn?U=P|D)sVTwL@Kxo`r?kY7#|AvQ|Pz5__+^l#68M+GR% zbTy)X0n07)Jkq~Ceh&#TMu^?_^A%&#FVvCs93c8RV2r7jF9jbBA_P1Am1z_MKtUJC zVgHmNCz-&GKvsq&*HcDEHRz@%pMV-^_ea1rRN)3_`09IvB21wW(w}(?jiFfu+noeN z(XZ;()wAy=_rM~y&wl@{R(NZZ7RM(Fd*0oTvY2ZvzYa}4h5CK{6LCZK9HPot#D1ca zgQKp~gmFC2P#Mt9V`d3s#o*5`bT#0`FzZQo)4vj0Z(`@Xf=@xrQ==NW|HiO($cZKo$S#I8}=UKQ&&To4`!q1R)E>>2f zlPd(TqIPHbk_98SF$_gT1c#~|C4tD|Wlk3C$FUs9?h(&4WjVYPYdz+q&>%Y>dREri zi3xr|_6iv!_X2|3H)zH|UXTSs{MTTu0flwRgl1VBxQvfOIZ!03g>0^o14Lb9NK(3@ z_UosOgS+`kDg?SV&SWB`Si6>WfXrUN4<*WE$=-5dHUpO9xy8OiSU_3D$%g(}qYB!y zCNqzn-;_k3BZ6A6Etz-X`Z5Z*gu$8?_}xqbC7#Yg+P)i4aZ z8ikU7o5KufTOYO3QS6QZO^GIjj%G*^kS?JXle4hF?kXNisG#XDN9vPj%#GD7x9@E= zg|G4L?hVIEG5vA&^9Z6eQ-F|@!n1XSY3>!;WC&o7Bb+564?BnIZS>3xc@zNe!9U9!& zeaV+WU46h2V^7&bowxfZK@CV)M;oJix0?X0o$auZRlaq33j_viv-47(@f#37aBGdY zJ=8AY8LyKmgNVkXyakPqmS*t&OJWJsC$oXe-N&L&7@4WwVjdxby*;J+sB-tva+1Qm zGa*M`c%+#H58Mc;WWcT{gV*~Gei1fHDM2TiWpmAcE4>TMbs`ldW|~jGkzF9P%$O~I zrlQ#|oAbJfNF#l3+=Kc}SpCu?zzPOrSn?6YkB#g1MzE&4&N;+)w|kOb|p_@$9uKB zrBQ>vidR(=6@&vjh`yyTE2|S@lcDPdbPI*4STq@FB$jj?8K zHSgU6K}v!9L38)!X^c^}&@V$d4rMTy{ut@yV3WlVi2#+^N-cuW*35SJKsT)~avKoa z)7_5kG4D10${pW~v0p^p3Y5V&rvUk7XT@whbDi?J4-ZRtEz2Ikhly}2x^1Jx zWP?JmeWe8uJhq-g@5+bG%bvQ;jV8bgRfiS++b-$6qjIwPk&RpOK3|jwGobOolzA1! zZznArb#0fCS?#a5MBzDKONX|#`=oKFWVsaeHeHm_g5UvE3)I5`%O7JfHaR!~M5cPL z?w%R1bh4iE$A)vdNq^Z`^YCip5?F=pmLe!zlw6=cY5*Y71mZDRTsPxb40l`-4loDh zV;1GC8Qk72T-1|*l~I-o8^Y0{i2x$yG3$u8 zd()#laxVpc=^EX><#fv0((l-mX|J0jI@yYn&b^vecjFf#LnK;uzErny^sg=0Pe5+< zflfCPfBBqFz`G#LL*#7ffa|D*^_zsM=BDvUck2Of8%xiWQ)!z4fBH%iG_@mB3QQaa zLUK`>$8Sn1U{`*N`Qm$liBZ2o`&QH`g(PJmV}5k->wDc1J}ygZcLMu)H_7OplO)Qq zfB!19+~ACLByYgM*0f?yof#Fv-5G1$8IZh7y->B#kJiWpefdAxdKF66;;T! zzJfioo&Vm>E$Dh9Ok!-)3h$wCvmHLz*+oxIKJ#%;RdD=c}JhM&CGAe`{V_cpt7w=$wAWp zvM@pK^m^3Oa_eBa&{N}xN$sq@{>k^LGHKhlBJ6L*My*oaKpT@5ee^*k<7_`Eyr>1mtlA6MK!^Vqt5qjxvickG34P5tgN72g$A zXR8*;$mUkzT;#@u1o_qpMi9Iww3QZo?q5+A?GVP&t~7D^#$mrN=+z}XqOX<=Rdcok zgBC2>udH$APi0S5BUsrUBM9Qpo#d;WeQl2L)|*3=vDSe?6*s5SyjKmDSb`weI_+bE z?5LU0#n6~~*l&Hd{h z8S>sz8YaiNYwKT&3W8k-gSK(&ckU!A4~Bpb=!Ss-J16Kpiq05O^-wl5+QL9-=YYd2 zzH{lW4OaYXSMCEr)aBI7_MdFsXH|O^sp-#K*mp`dWyiQ<|3w<5?SG;+mDrVOa<_Bz zy7@UGVqnydXjz+>PX<{?Ki`E7ZM+#S?_sQGOd9+`I`Sb790rhorx3>?qE8!$ z>m-f`y5uPl9CqukTdV7^w(#qevjXaWGq(8Nq1-AitQg`1uekP0TswzyqRq-nOPUoI zPcc%m4mCsh&1e5M1|gN-V5!KW`h0L$+1bOdkJ#)x51;KKfzh$3hx@Ymc=@gOqVv8T zYn%QJaL!+6q~mpN6K*Q>tTh9gd_Bdv?qRErW|ea_(v_O+dep1RaBPgq(|AY)YyXR3 zDpr*}6rg;sr2o#T1hxtUm?I=!r+#VNjCipWXf%WHiMPRSrp8aZnrreDx-|pK;P;zl}u>a~MqQC9eKMYs2%t@7>qT+Ut#DlStJvl7mvZVu9Ab8u_xf8h`5zJGQc&AM*$AQ>;5lIDR< z>bG|k8wduT)2w-lK=5_YEu}cgKQyHg`UQAC%)Cd(K?3r$t8T)d?s~m!&kho7-G0UV zai}U`y3(I!l?o)ZhF^snLZ!}&>(^hUn0pj6zFpjbyL#W6yn$NYGVtB85%mPP7F<|1 zJzRg`cN1kgNHkm-z)5W~`lX|xpdKwmNbqbk9y_^GPE(>Tak%XaIxmsVf`A*D41+79 z0oCXcf&+s(V)@s1c~KVf**IpIu?QFGI`Bnhdl6^>bMJI^%?b3XQ7Z+U0HhQSm zKZPs=9}?lLE-Xsf-Hu@@u_I$RV*1ZS#vscmFxiN@Q7{1-nqe82?Ej?ItYP7|40I&N zft3T_P=jb+mm&O@p!5Hx3j6;L^_oSElIfw*y6~S&Am9J!1`IOmGS0Lj;Y~!*y*%_g zPK)>yxBn$>7}v_p7Dd5ADvQDIJSS@et1AL%T2TsgbjBnWk%8dc!^r|surAsId~U=I z4eb+Si9pBMWZ*NHeGPd6L>~ryqylNq74J&!U`Mg~0)5wOaeNFGawU+@;#n?J;Y#;@ zlcBaMHm3{VG_Jq(N47_vJsBF__if1?pt!KrDu=}_bg~6%i(V=e%13eW^p#j^C}+p< z^)it7z^GHg1%MOmE@fe;oTmZ{6Jk?(PZ2yYW?aY*Oh5pWncF}$nA zB8_k)o%u`okaOZW&{y%6rGVix=6KLvA_l(yNQgao?7{I@r64^@oj?=|jy5v{O6IK; zLqSz0x7x>BVxx7dpTA=yG9RZYg&DsLNMue0TkZK1s!I)Ur2{c7hav&?QuHYQRL z_^4e09PE?RSA`GQPc+n_VTH_U>Ht{23x;P7e+`*R7YM<1NG!YSCMc3(v~9GR`L-(& zr9yz7y0SR~nJz%~y*hBd2gSx2J%WLe-fFcrZngzH-^cMbnKD{7&f77TgcJb&FScM< z#{b$-ePvg^JqBU%a2CwIl|kV7kweXHLRA#96JnmmNuvScetx6!A=o8AI{VjfsOHoe z^Z2l!;K9dYp3sluG2tnDm4t%Zvo-yET9yz3am}tkY!)3>V{*tPM9HDX<`NTR#{>>< zwD~muVxZ?a@xv1nPO~pR*$F57ub6}57!3`?2Y}#ySDPH2^w_cgM~G-}qfqs{ccX+` zeEF>=@AR{_{e^STCeCE%n9$Nk6|x&5PT8NFd@F!Cee`8s(mt!{-elWS#lrkcx#cze z$LNu7T#S)^v7cs?q8!q^ob}>|T#7ovPY7b)Ky)X^7Iib5`ZE&dCNhMcrN8qIV?Z@; zC!xO#b#MS5-P{g$Tkve+q-oc-4znv4g7@o%;R!FtqfAkk=p68ksO!)3$FmGS+FSPi zYQj^2Aol#kU1HwaY;~T~!aX!SjJH&M^VxoHcW1IFvSp%gD)oYFRtj-8 zL3QoqO9yf&0wIJDess*PcEg{o+v1~_v=&?Mpnb86C+b4DZ*W1sykIjwXA1nb%vx4F zG|~*i3r&nSbRpVANRSf`)5io_roFkL-NJSN?lva$jt{ld5}Df zfrl$F9OZ$dY&+WiGlY2gZs3tdrC@n_3EHV5Fp+;>R@Kiv^*yOmC(j28BCF z?&Vu<4>aam31`JNRi-F4HqTpi4wOjaFa1zl+L_J$lMZWpq5?9~Hj4FD z*iiJNR#`|z5tRc0QgO=|%U4E zJge^%-Hg)SbiY$nt&i-Pd&%h#5=%z60BZ2`b}BjqwleoxZE)Shmg!=t+Tu^k{i=Dz zsU~&L`GN`As$_ON`ote@hl`l0egB%b=god=FY{1-Xy$7*+GwVfPIF-1DJCYUftIz? z7!&l{qcjjrMc#VXO0;0Q>-#tkF=lKbZViXcQ4c?fB2N@L_V@cwBh2?!c2*x*ww-23 z`q~HMBf?9S`*emGq~Te_=pkDfKx==PzP5JS{CzF}7teA!2gk|Ga(J>_=qw9q$*4W@ znchsim(*ouGgKB|Vz+*CjZPEhnI-jblo+b&7n9-+GHROq*2TP8-DOP}d0Hl9w11BT zd67M5u}5Bn?!K)}o8Rg=AIr2c*jyD|oDZrv7NNc3!0S(WpSy1Guz=y}#!-gK3@Y7Sud| zcSk}^nN^T{8#)}6^-eLMd+rb6FE%_W!#F2-FX>rohJDiQQF`+mKDF2h8T82d(FOA4 z_n8KCu66bnR5pow6~QSu&fC?zs)&!hsaoqs)%%$pOUF~BJ@&Cbf``qe42^S!f1>ku zUIw&d2A42Hu9th#W$n@xKy$$+!k2f(V zs;yU)cE7PDYkfci*`1kb+{tOo$m6=HwJ@2;j}?*zq260?&A@H=dgVn%Yi7B|;-_Wk zijnr5==qC5NI_}K!IJpUPYD8S&4r*H6ME4?M%gutHn^hPh|YWzZQZ^J&x{<0PI z!jq)pv>L3|sWFnMJ91#ob25r&g$|~hzVGo!>Bdc6z!@-iwheX)ifrds>9di1$yqEQbkl{;?`^zqxVPuWu+Y}gY&CEe^+@KD)<W=3(}z=2V9;hJE>`0$Q+Ncu(mc0hW3x%R2oXFaPSV&Ou3|70H5o;A)`eXe&|i?>F>QC7^L_Mj0|A?Se>CYfPRJGTe$t^Ot}!4pYeL z6-WpOulBd>A0Qwgps6eOF8^kl!UUK0#o~Y;AT|%vp0`L>Zn@i7UjKE2wAKTEyaxZV zo?+NwgTn|cEYfE_Tr6<-v7R^Y|D!O-C7u+S(DzY~>GA~jT=#X5EdDy;Nzb=P_a{Mi zOdpDK(5f)+MGn+PzBxH)IF`K4n0^$V3wo3IHQgTj*1xsNLt&{6CPo1UO`Wdw4r8YY zKObfi8v^nzNL_=DQ8?h?(tkX)%@K!u>^$8gdt>s!>pMS+XCFPgICDx50x2Y)mx%pn zo#02~sf?FeSi%opl>b<7FxLMu!iw5QWc{c>uCD%`{o_|@&IiH|ybw?+(DqV2>w+g@|+GD zsZoHv&0s_5Xdb#)Gc}<(5ikV|fFzD0l4ZuEw@;11unLYnD~q0BZ5}i@L1k%ywaf?k z@moI6vg`Q;<=57cKPNL}it%5F9q7cHR~lrV4Ha#>dVNI-i*%Wh%;)acm)x;ao8}@S z2`!90?zO8RGR|99@c{-?BZzo(b079R@wq-Og*YyO&`s-@O&cYsL(vy#NG z?J-+V_erDkeidRB{px^Gw|Js|m}{yk`7UUphsq;CCHQgJaGinDjE^7zBW`Gq^2m7F z>5qm>i6OieCcep|RvPh`y8tHl z;?n%ihHD@3p(I=5DPTEtBL1Y`8XYll!)i)yC)WU@@~&H0&35^e1x2naEc)(IS(Icl z3WdoA^HMI2S4QAS88gbA0 z19H!%6T~o}Z@rmzLV*FQPO72^}bbWCOUA}UuteuqW#m*Spi3ivm9cn z>q9raWHg~9D|X5EPX?-pDv{=@Z9ymN0jTD6Ezj1wosRpI+o12$vsvc_kHspJz>5Wt zfvI?oFSqtW^Pfl{d$RbuQf~8N<0tYlgQ$92K_XV0nd#y4{o#uoPHz&)O~{EUJ*LHc zC>xDa{V@){xG~ukiK#K?yF?E+pDKN3nqb6&HdBTvpQEdAj^Hwz9fl*~Dh^S?6{!haV-h!YdkF{ z!_zdLxr(QEHVn9*Ukoz@HK^oP)E1^vC6uwgI5qv{KuNP^rtjyrc_tjv`Inep70)qH zBpSj`&>C&Z@YX-D)GV;S@xF!aa{bS3OJ7}IMrT95-;M>M=bb1W3%^hhH(gji|Fg55 zvdv|nb(jzZl`!7$)(jP=W0mn!;qZFVy8a<~(RSSM)X7lAPTb^5&D_)p72mH5lbm=n_OGCd_L(pbZL-x{Sa@v<=H1c+}`;Z9?sCCx-xsx%=Zu$RA}~c zmO1>BSx<6Rt+Bswt1=Wp1tsL7l3i;1f`zr^hJS{7#~3$MQ8N|zGZ8emL7vAg(k#Rs zA-v%$WY1E~4$#?@+t)O0IaBvd%(g#{Z#x>s;<@-GR?hc-*`%-^o)FkEsyZ(enXkyY z{w1cLW@{efT{n-X>+2ol|3j0l8qA(dvFrr#?KjDxaZ5ul2Gtnfdp*72Nlzj*tPX}o z2F|6XZq-djAO4<8i#@Yr4T}T(H@afSJj>>jB{sFBQ16cGKm}YXQW`((qp0xm;5W%3 z-UmK4UAPY`E0#zfUTv`_s@~cdp8YWFR6M}W8 z$kW^JqSj!tx(_?GPDVr`R}v<%G;~d{VV5-KpL$f8FE}bxH^87Q!+JFv@#S@g8NQD~ z+{h3jY;-u#W{`04^6#oT;i~4G3?W^z!Fyo;&hR+&F9b|s?-E~S zt69J>eZUL5H?XcLDptLgX9OT(->MW$;8Ejr*2Ga`X%Y62U*D^S_T01QwegD;l6n~M z=X*q(SMKtuYB8t0liUTQIcTTz{RA;KE-SGf*UG-(P819nkm6^ZLrF5GOtB4onekl5YMTWY?f=Ov<@o7Y4 zE%Z%%_gjd$$f4BbSNyEfC$whEH4z=zrTLN>9@Z##{Rw{L@QhsRrK5p7oTzt>apMHSv__ zB`$$R7w}-5OJ}(<1s>IKeLovhfN!0|)-AqzdSNcnLbGR83dzS{U>xpGk7t@08{u*% zT>5pB9Y-`@v|;CsNj&pI@ko^x!tEEybCkZ_pW!vDf3ja+>J??z#4G*wlQU=rey^$~ zODoaqguqxr>aVMn(Dm>UxQ_M4w4;<*a&72}0f*<>oiVNVy(j46AmdGEAV7p!Ql|4K zuaG7mSF$&<0O~|`R~2Djkz+Y)l&;RGozOu@WYld1T83%VIOltM!aQbbxCP|gSdRjO zHZ;_KSm*ynGlI-1%TBw40M~8yzm^DLvwsP5mqV}_*Z)6^^ilX7`1N0Sh^^=MR{#W& z;QWn3rL_2?@HUz)ds1U`jv4S-UJ16-=h8Pgl_UbdATn_JdJ*dhI!vyq1Ef%m-C#-~ znTZH-ig&dz5uIlEuB!J$+P9quSj{g)f{WvTJKx?aZizwP_Z}TmV@4vh)24f*yxWH^ zb7CSKz@UME@PPb3tu~rZO}#~cKe($dzvPGoUan>CiQ2m3Xgw~+nS^Wf!Fuoyvcp<8 zw=i1U;CQM-DRXMKU-}zeZ7n|pl}IWN@yI0iKP+{-gdMjqaAXO$1p=Z_XNQaHEP9f# zo_Bq~CWche`$VlTdIVYf5nb!w+Pg0*vfRxIE^z(lli*KqBfowep)OFr*Cu{hB}sB>8@CqZHOl|klm9mSZKj#)j+4Ds3+(bP{iv&ESfvKgAU$w^w8Kl{8= z_92S}$C%}@fa3R;<_Mi3q8j>B_o6M<@xadEkzQzw;HNgR0n4+SoF}cGMX-c6kfa74 z<+dhhu?EnZX^)(Far*_$g^l7GtY{I%4=N~NT7r8Ym7lWpfS$q9eLLrD8!B!uBd2a9 zfXS}Xe)|6coNYHvH-pYMscZnVDF4LZ5Nv!L;VNPhPOi0_q~a>tN!|aX+M|4BIQ+7P z#Z~mjcsQoIZu|GdBzlE}Eq4Fo?12{Rk-e>prB6@Q$2~}t>l-9??2^lAlGqvyb^76f z{hRiPE%>9iakoWti5CG`V`3x33^SJY$ZIb2CG4Dm13 zpiWIe?~1Sy&O%F5u;~UsQAREqfQY+4(>E__z{_GHSQkDSMoO^FG zt@~5m(3xo}-N~W_wX>=2ziDB)8A|R@5&oan&x{!zIR>jWD>#Dp!S{#e zupqaqewyW)V9Fq8(bV$a+321vMaavGv-h-c)bEK`3(+(phl)yu* z1T_ItOg8F;@q&97D-8u82|7L5!{cId7M0h@+gAs@PGZYSaF2Y|TCR1sqg`imOI%?Q zqJ4UD@=lOf`n#mW3-Hu|FqscnMmVAOEq#tu*SNNs25c`go*M=j@5_yw8R@=rTU8eR z{kZBzLwX&lrj%AlVbM~2-^FT?Ja-`u;0~BA_nTt?QcQj+kq#kxAs=ZjDrW!Ip)mq` z8f3cSw~z{}bYEg&XiH8v40>a1vg3dbv=g8`Ln-*|EeIpD(0#i(VZbZorf)1n=h{iQ zJDu|Ak#K?bFyc)`+>)WQ{*iLO&w|X_V(pxuN!f)J6WaVC9EfZ9jr#ND^AVBbwJ*wT zFb>IsEPB~i1+XJa#Z~cYnokj=S;c%DJU4)t1bg!gnbRHi{9>ePm|8|A*HNQrS|L04Ti3 z8e}u=wAImkBl!g^f7TvjniDp(-{bSeEqThDNP4&}H9kCV$eZ2Vajmlzb{zVYC&>vI zg~m+VZs;4s!32YK2CmKu_L9Q~dv}}G(@go3JM9uFifxM6PxRT}G%QGlWePdznB z%YA6Jp`z}M(|fPm<22A!rwVqB;J(fbC-YVV`i}X_c9HCE%d}e=>*V}uWZn9s%Wk{X zy73DE*5<`b2~Y zZDx6ya(f3~4E5X7q+v98+I$BEfju9H7b`#iSy^lCZ|^Tz>EFXryut4Lguaf+(nz73{B%8Mgf~UKi*ZSlvl40g)wKx4yfv7gCwnpA|7X4%TDol z4#e|WS7avHNs3$}YP;oWeL>tIm!T3KXPzBdz?-Wf+jaP+phDYo>Is|TJ}0{?#vF0R zZvv425;#00U6M1qvB+CX&0H?4_*(BbhzirF?gEJfdk1h7bGt24(lYH-0zOi4St#gN z7hkWe!-K8d=GD~s2jG#3UaFP5SGES1ZEJg`zYk!I<%Y-UlWP#pje?Dw9ug{(n+Fam zdmeB99TE^Ew9wL>gJP@2?S)2WVkPC5Ich%BMR^SPbo~R@f#$_h$2}gB7kXS)b;KH1 zT#C==hptg=9U1oLHam)Gw7DqBUd7x{li+FSnQncOz&Zj8%5|px+QM5lliF^F(d1X1J;;!m zcskapPk0*3JTN5-Y0bF*vZ*fnhKl;vD0s8lx^ zyF?Wxb+S3fF=R?wu5hdD`5>^Vx-51z9se2oL^1Kx)C_OmYYn;E8eOFI{aD#QU zZjy(7BL*{I^)U%F2}nnFH4z>Y)>DXHeh>>o9;l@@4RK=S5?TKfVk9qFk?j zGEmFS=b_T(oI3h;EGL?^@3kMxFzLH6hy`V9f+!pKI874LlpGEGiAKe$fN|(m-HgkQQI@+?1{%A>m?@6a16k4KDDOmiu zP~h20blLLJI$gYx+H_D!|LUzhv&Y|VDd<3XjYYwF_b=k0k$)H*n~RkQ)5p@KXvBiZ z^Q-hv*eJM@L8siFMb6pT{xwKv(W`xgX_#+D!k~xcPOVZeOPhAWJkrENB;VU3YS*O`7)Wu6B$Jzwy1`PV!?#K zpkGl(q3;gdzFDVJ4HRr$I8IAfH>nS^7``7BDfO6X+)VLFc#@Nn$T3VGd4pAflO{fk zQeoo)0U7#`hlu0%pPOgr?>s~U!5cOm-vHB7X~G2KAVxE_nHkJRQf|bMij+GvFhrdB z_rVielMoT7W6#;FBTxOl#sAanOx06H^5c zi@+p5U&eW|*Pj668%&f(rtd6WI6I|*eW@v2VKl)OEowr-zn5cmeNc=iF0+-R&(zIF zO1Zk3ziVZ|EsauDOok zA3!tS8)z3+uOWT|watrV+~-(A#HlRod_&yu2p$r2hv~GNS{dq&ifhPA;e9>lE_xwyVw4 z@IoF@UX*h>qgP-B`UDPCQX@I)e)`EVDP?Dtr)-&o_gAOWgPIh{ww>IHWXmq`O4_1z zpi(?$u%id5#`gaF@0Y0b=3UFPFP$)MWGJ;srOg!?}SwuK1} zZAk(3?hp+x{e$|_`yw&!cm!pH+qi^y=%lsuHvcUjJ4LPhiFoPf0UyT>j2AJE=+x4c zsp{Ez8o*m|#~j*f7A-52DT zW(tR7s9YLtnyJzdpn$Co9b+eFm+-NbJ%Y>F+nJbF??DL_1MN%>fA-1v_jXh2Maqo^ zx;W0(WJQ3<$cdb%_CtjU_d}C~7(!-}jKd2*q%?fR<`0B0{(yJ|d}{G~fD^f`oQ*@#{|nVS5}{&^JB-J|uvM;VBN<$ae= zgLV}oq1#(kPcYXaFB&H@@;@^Fz+6U*BA(_IgwXw~D?5ZoF*?^6XY2bF76gi=`-{zV zM&#LVmnQ+fODGf&T0OK90A{XhWc<|dD4 zm5U5k6i4<3#ZI|h_~Bu$_)WfhEQd7O6WS@>6XheWY!@cy^-;@#e{)@hJehSLrLa)V zwGC>vHc%X6+RWkr2HX}5Ha+O`BpTwAqIR0I1qYY5E#+O^=XS>V=h$Xz8E+}CcNM3> zD2`@7Q50x~O&FW-#D!vSJA+8jB67=Yn3A_`;KIgzWwl}ey=|I2)Q7_|up zUWuJv2KNuDG62e|tv!vloJ#oTw+`8UV{}EUyP$e##zU5~j^j4Y1O{^*Tl)9LAU0;J z-;!jHlw{zOXR-6}9MADrdcxKEv;6srP1+C$8AHFJbvq>QK}E7!!9%mB`L(^=c(}}i z{^Qv_R~!29Q`SpxhNRG-;@5IddEJg$6H%5;lu9chvhLcP!qFEZLZZHGwFVsinN`JT!<1Pxq*$lxi#v;-3I{nOLMY1I}+K$cCU8BI{6r<`W>b}on zHJp%j*dBxA52p;Rs(94Y)goYmF&AIcJxVZ`gPDtf@Uj$I_9yzuVA5Y zj>B|Y)XA5dXf06-6m~jG!N?(oQrmL&t+x2&y3JtiYSgtS>Q-9HTiv+WTL$;E|GK?m z>{S#az18w2-;w=&m_zFm4q}&guTtJ zs%qL5cHAF@anrG$>`*By-$^oN5oPTK&+gXb- z@h0T@9`ALI!A+<4{pJGi56bE{45Cp;XPT9A?mkoQ2HJ6aElETGwD6)glA>f{O<$0d z*~ZoAs;a~78hO#bc|RERO)%H}t$z>cFvBiH22hv59r`v3FV4-38N5Y1-^ARJQhoeF zK0L4Z=gJ`?)u8!jhU{0>FgYEAi{0WHQT)l{qg+wxJt`#z0Tg}ts#!stzXwXEp{H3Z z4F_VlzjpF7nfepdmw5V8+{P5{dX?#Y|51)H^%=B35lS>V#j{>P6MPVHyv~nk?muuf zT0?1~hao)r7;!&P30ByUQRepyUW0iy1a@9NeDHgMO%vjGl^jpz-xQHOFrRd$@oW2I zgc$yAj-%s-UJU~T5_lI|+4xlFFdx;ge#2UkXLj8D8%DfYnml$4o(^)IQ`Td-ICrXh z;7uTka;corPn8UE3zjENhA`TNWsyK$`|CN7EtS{!gYvpQ!J%g$(@RaYR31t!IIwoB7>= z+ju3YjEwhEk)XX+cG}F~ucj5Dx~%?Rcqfbok3igR86S0q$&2UyvV|;Wa;By8KcqlH z6rv+*-0*P!DEAt;@Fs^A(sJP-5N3b9{EL4OM=$y-J}8ZV#mr!$4y9zd`j<<0wa>Rc zInN#OO3)E|^f6e_{NbeFQaht` zfn5dp@=FNlH{uvDT+n6r0~HsE6)vEHqTPm^iq7sn!%0g(XQZ}vhzR)@6zcxv-q|cc zK3n609)!YyhJ(oE{ea8Gl&xtfg2m_z6Qd6nCdyg`=d}tc!|tUV%1jn}FGPkPPMoM! zM)VX5jofd~kVn0ZXL!Y^y2};qvM&64;3ru$#kH9hc_yni*WvAvWF{*MH>a zD9_E6i2capl-^=YxGl=Zv-)VdesVK>qxef1!m@dm622jLhcju0EF6+cOP4l41w^Gh z^sNtBbtr8^Y&kol(M3La!Y{f#no<$2P0WZ$B87NR>at(tiqKT!!ZoR=vpSi_ zkBGFd*WXoJF7=BhZ~nkJVhtP;q## zCNbC7Bu1(8tm@Wkq(-SDJNn#vhp$t)or!HK&HTkP{>um>X3&87*YF5S)VzTtZsnvz zq;>;s`(Q;oc4R(ws68;S3y#9OjO;jc7xEPiBh822EHziCWpd*(x-maj^NdH6M%Rj1 znXNE2-3Uk>zzW?`IVn-GE>70%Af>0*c;v%Y3%p-EllORK>d;YNtDRD1q+xsq_IG5U zJr(@)1*ZDM=!D;X`lrWPSeFIMO0r3=TA>RTg%8Z)B7vw#0dxY_BOP|`(Q`04SJax7brbJKVSwc7+T(@1t0TWWq6WOp0cpyD&ujMQY0 zziw-RYPmtRABR_YxV zgjqQAUon3VwpuMrq_Z=R>aRt)7`AjD8PZn@^#C$~X?>O$-8<21XEK%+jdk{RQ5()6 zlmK8waz13=#`!12)1G`tEsG_Y5{5^A9C-iRAI=ZSVmbIFnz}eLIy#Wiz1`8?7uS|c zK$ZBO9+4_P^PeW+E@bGlv+>g-^X=r%w?Um+^fMSaRk%NBw`>lu6aQmrwARaThd!fQ z;i+bt78i~x;LNTKW-h)N#y-g)j82GRwpFW40D$qWej_N_Q|jV2Di|Un$i7#tCc^sx zAMbrf;3PV^BfKJErdb{k0%;hV2PJo*z&A|n1%2^NG`)cX7%tw-GiP32a1M`0-DWoZ z;;NA}V|b(~R3ts4dKW*z=hJDsh`n*ev`TrlfmboPSR701^T!`BAb$igJri@aUHb|X z^)M;q0x#V%3?|XIPbA}?_E{v948LCGZWn&=TAZ813kb8)~hp`!H6^xH0?vKAqYfi ziP3IHjgqZz8FoRJ2Ez1`3r7;>2(2gdC58v}{D@^GKJ<|$7{Ns(XON#^QW6v<3x@cD zLmFp9Pb+4Da7okIRY1`Clu`B&KNKwM*#(4=II)MNkZAoI zeRkp`xfBrUYr*9!L`qi`CsKOWP_QEb|8EY2%7gOi6vU4N63qdBH6v`OY}@&DU;{6z zQd9tIB#J?jM032f{&IX7>0Chy?AfHkh0CLW7Fq%;_#ypO^Vx~f33Rp&5)US=sJ#|n)`Z8nz#^s3+U&9S#eC4QjUj}mPto4B;}M=uJH>L%O7mBkFITuBy#B1p_JX>| zH903*3Gc)FzqWF{=O>o2Qop~=7TJMft>*LLEkl|7j=fa8!Sf!bvGb;LqbQksp2s&G zYE5x+r(ihi$>-c;(Vfw{_iL~2 zvT&*ZckFGUVPoN8V{Mbd3D*sipP1_R&ao9vv(sgtOS4z7kU zH9zR0$Gr7b;;?T2wBOEWRTDNH;QJ>C*WA#&b?i0uitywwbUNS1SDOi421Cio{1(ki zHXht(T(;fEf^ksjRko6Aw1#UU}>gH?W!l*UMbi)T6rrhHG9w9eVYt` z{uXZGMa#Ba>ef;@fjD+h@3`_g0J4lfDwf-Wbvu*7U0lbd)f32fbvDnGal*XeYGD$M zK6_hFQ<1$)$64RLAdXs2bUUA9?Eq-0WrN|9u8mD`|M#AE=4Y>*u;<(Tq*&f z+tU-vp~V~dKl&C!PJeC()1*HV9TYi!`*c6EOFraLz4Ns`t(K1Ro?4-ZaOX)a;PvXu4(f~CkHO+OTi zsv8lsK?-rGk0d4WpG_*v&+a8F4i?IGvsV_6i^L+5et4`s#Tlu0v6{qKTfgQIDw;bT zV1q_Ux8L3q+Zd1aK;5oNfzPd##h-hx=RJHBiaU}XMIokg2xBdnC3X`y)C-bk5?xJ%%h@5 zr^0J7FdkxikM+m$nvb4KLjP%T{*Fb0s|z|7<%q|2Ga2usnm>8Y=<-*chB=d9anpLf zS1Yty?wtTO#*?HjxyS6eh!jEB=NC}3^}1J-=b3;f!=I&82K~*wP(F<0ske6b=OrB; z!6pO+#4lYzYflVw`0;Jl-ZGcw}%SA=_-F*O~hEQCpTsxroej?aP4EfzvYZ?KaV9 zjk-4kSbv8_hbriEx|(FZh$vo`FIQ3<>H$T_?Jn;#7%2&;h2OAU zc@r3EnO9o*D{ZnnHH5ox~ecS5$nt_r*m<%AmL^UJ;SNJVMki z$=uy$)5(8sV6YG2e=X~up;$6+j*i7y3m$L!<`p>Uy(GZauTw%G_nCI2q@>j2(>axc zf`+kf2!v}tgDGJl%F{}v9&z05K7N0f$;$Uriq4z*L;FvKnPtD&9IK>#y6Q^c{WVVV zGVEr<)|sDyhj3JTeqkc_*-`Ia4)wz1CP_)~NCY^i%DL&qR)jAm$_nl6T_RxkSGvD{ z#4Kcgvhm!qcdC<``(-Zd1!mf!a>*tmqmK=TMn^f5Q@9z^8VWNr%fhJM^v<;|mO6)u zkk_95-g$C$F0OckQU2XGi+Fh~UlY0PSAFMIQ4on-rcRW;Uf27tfPRtX2cuame;C~66{^n8<=iZISN=Hgq z>Jd?D=qD@tW+`)|XzXcbm-p5iQ1s$H3_}TbLGOY9a`N;^Rv%V7_`JJI54`mf@Bgn% zH7+=c$eslAe}>mV$zUUj6dL zYZMfeL<$OMHJlneF|s&p2>yXMUqha!$m`&orJ&%YP`{|E|Il<{)VAPAUkzbbwQqF^ z6DijjhcPPX5QFHQR@05Tcbi$U@uDGbp(bqNjvMqLBvSPD1cTaV0qAMGk|X4_+Bg+! zY^JW6hlajORo632NfK=fWM!)NK-ub&{*T@|LvzcvM4yCF=eDJs$={+j##O1d=5C&u17)D9oGYSu@i==mHWFH2-|| zKm~&#s4%lb|9deMfW*{4mO%k!fHN&~ojiWwzvs&YNOW@k^I1Iv0l{=<=fnfyoW z{-2lR49dN)smQ)iu2GxqP_DVfGS3$~Xj3@vZRBm^>4#F5ZAYcdyfZlDTrv~Xtf_z= zWR};5F4>oOxrcdXXCN0xhm9QR1H&+iesl=VEVVH9RLiSLZ`DFMt@F)pov#*lN zdI#L9vlmTCJQb!AM@7xRnYmQ$oENK%PK+eC)9vdPFWPTZQPE~=<;WO>9*o{)>gN#3 zo!X!4^CTHJCaz!VIt{JmOy`k!>tECVGNJombtbgX%B%HN`_90T5|uSTXj0WE z{5YwW;^nm!t9?xxIO@Dc&#jNn%hfC{A_l5+J|c*q(Zw-=5|oS2{6Qh>gP zYQelK-nR{ehE{$UQ`llH`Sw2VCIzbsg*TWpf>OUK}IT^c7XzzmQTVc=*9_ z{C3z_lFtSc0l9Q_X~5*;DfMWRBjqc$Y+e~l^Vo2{3w5Rm^ngOTLU3v6Qxk+MiFa?wuy2n!~s1g0+ zo{;51;Hbi{C8O$mIR`FVH-3i~U%;0kT)j4rcOyDX?<0@A!&=T#?sZ$V_Pu*4Z1Zw_ zXnRicj+!wyg#6qpudY^aaik(ufigJ*7z3ar6A|@nCN8=rh|t8HI=iJK?fe*(@Ol*4 zNylXQB|6V*zBfznHec5@ziyfx`K9;_ZVw_)*prB=rY~p^0;v3e?RXXD?@Y}EW}pt| zW7pt-lP5z}w6(+L&e)5z#RPxOqE}Sqy?nbFPrl_|RHIDZ>Qp%WI;(r1pmzoTq`U)p z+#|{ujqBd}_=UMyfESz6Vof4ls_*>N62z>M{0O>-L2PnH+VKEw)wv2GPd=@3Cm5JN zG&VgOJb5`EP7FO9X~wHV4T1Wyzq`l-K52%+c8=c&s^#lC30$JV04pzkm^DKYc3MQG zKL5cFb~z|~;(>hZd#2xy;q}YN_Y4*WoR?JPFcQ`;4(~mgIt8)lEP^Y~l0~lQZKK%G9FqDsJ zpV4>zW%R#{{#T;^H68z&j{jfUXq9pm2Z#LFGiHxC&OJyTs`e=2kafCiDF*emJP+K_ zb>Pvi+jHn$VP1}*24VkSj+aR#nF2Z;?;I-ka5)*va$pO1_YTgL14OEmA|@)l0l=?P zu?A25^4A!ZegAVv@RceBgbfzTt@tjGf$Lo;m%PC-EpEOf4B_L#?St2WqrH(e^!cbT zr0%F|G8Keo!f7*d-=Y7>1xk8`bulN9=Jgi^Tle3cl!ED1h;AR^87GKheAHn z@h@j|Zf3CvX}xPjjLR3CxpYb$`BpnAuhf1t)~GrENN_ zNOUIH55?0RlU8`g?R8%-Zx{#jcAvUqsCK@dklp)~rK!)467`TwX!WPg`ruFfxqmq| zOzBFA_X6#65SskBbxooo@ket6|JX-L>VrCLDDS5nO5P85f`lt!kh-mp^BA_`zz_a3 zS!g-i2dl#F6H!fJJU8ml=FxI4GY{-8-atti_)Jzar*nUEM%=&QE?V7mtRci&udh(h zo$ZbSd9};TmAE`hs9RM#sAm=LsCKt5O(}^Wj~8tSrL7Wdlb9$I#}jaMs00O%?uLqq zNR2xYYb(3ko1MF5Sj)`dWA`E`YkQM3tlaJ3{?zo>*iho9zq;KYDGy$$ZLa`f&y~Je zBrDj}q{OXBK2&JkR=cxJJ!a;4x`FREmgR6&h-fqFolQsUI>(cJOXp5B!Dx!zN z4-nhYCSxwEqI3R?+Js&@uLPMp<&}=#A4)lsMB8OvtO~7%J~dc+9F8w{+okR+nS5G5 zmL&BsJXU7Y&2^l9+?hKep)dEQYtm5lx_bWmRE7tu1`eGeQIIx3vMp$`f`0!j(N?lg9_?fBoWzpL>+^{-JoA>y|8zx#rkIM&PO3U z4LQbzuwbAOEt2P@rzI1Ojo?S?p|^);Qw>*cRoIlwabgYA>dHzt(_bDlSEs%)rMDIZ z^S0wvC4Zn<68#imJ4&cTN|MS2satW7nP(aTnIvDx{BT{hk_>>1?&L=T^48CY$iXj4B~Iod>UAb? zq8ai5%4?T?eH)9@M^q0i>RxUpk7^)Jm|4$?=Q0qdzAiqMad~GEOvs~KcH-C?S`T|D z#vjP2*Qr!f;sU8lr}|RhgszY7SxOo-KauNSX>KR zR4W7(Mi!!5$_wz8eWq;v{jq?gsL|U#2>DU4m|l~1{PM?bVT1!CSJ2~DiH@T>Pl`Bh z1=@LhmczFynor1IGp+Nr2__9qMpJJ*#yC`{Ft=MA*pi0U!jKhtMxhjSD|6W?(ITZQ zGM|B_#xp^-ld<5_!-n zNl#`}vc*eij-$93MJ$ADl<_0=Bjdh4rtEdj4L)CvEpM%v;uC z_ffv-lIBYR?4|i*>GVgtl=X;xGCHfc`JtV~mT; zyOK1VI$>PPlR1&AY9Y%@EWDfY4>xmVoal%oZU%Mx?`#{pN2O(UTkFUlxce4xe%=PA zi|^craNLHqgW(SRc$_Qt{CcmRwkHyk+7W0w>2RfYBWQ22l~k2bY!7&)F+|gI!L2^W zs7z@V$%2S>@e&AItd<Q9|OzIS&cERT8Z{Af?<(b<5v+8R$215?M;DQiRCS9u{Do)gl-+(mCD zH@bOe&ni=HF_(XT=I}77bN+|DcpeRpck&X4v|(L*316bF`**A% zGao6}lCs^1f9o)$7l1)rl0{FX1M?S^B6ByYzNchTeSL>+xG$x)D!FyKFq1I9Xcd;SE%&|OH)l% zJFxl%q^;PbOB+9MtULN#a^5#h+-5U}6zPg9ydShE2A_BTbT?}r*ahh}nykUuEhg_C zH3>X9OfxLg!F%hW~uk8NAN#au7q*CEb> zw}KYBZVH*Bf2=vYtiC&x?VVU^Uu``;OZ?D*3QrsMygza*=*juug-HRRlirDiWu$l^Y$sqZGJXs=QpkI~R;# z$aL=Hddn(i+`G&bao0V|!!tIbm(XbfENBH=@15nUFIos=RclyWG_#TGgLA z`+By4@ZbrFt?|XzJocnU1uXZm0j=`xA6P!b3&(DURwl(#d{oc zjan#JMELkmDS9bQob_;Id9q1|P^l*f>vl1=xaZY0ddVzEW|kv%k`Y%AETFs{Nsitt zk%-179)b-~TpTS)!>E+~jix=`&@B3pz$hKNRA@N{K|q*PmM_ss-|JT{cC+i@!&Pr6 zExjlfKs#!@5(!-kc~?=LQF9j=zJuSAk#^ykpqU?>PGmgTFlFuL)bgR=TLXuE$m)_8 z`rc8kE=}rPx|YV!o!c}uk@rKkhQs4F(yT|7*(VIW zALlu?t8O!oQsYj2jnzn+OPmm!yFlJj$RA9$@VCD*O-O~sH%TjT9u;uC9ObH!xr)PU zaccEJ{a@9%hUIY#T8Lh9U|{*+&4XFVqZBWuyl7R9ePj3px$5~-&I;nN8+#-k;8 zZbBqvH?mbgiR_JD6eH@&ypia?)#$UZcAC`}UWP#m zKD_su9x9uFs6a9rq?3o>lBRuC_MxuBod-WY}Xd^UCV&B`f}@Qhu-v$pzGLTCxTk*ng$dup(;s7rjvoHeo}6Mr_=9tl6tfq zR$1FCu|tx~Jydk8eUIL?`AON{MeNOToiS53s9bU?Hre2elc|W{@vLY`s*UHJ2_j}? zB)*e>bK8_BW4mimHsxETtTQrb;bdg(eCm4lQsigl^(8L8`~tnxxp+(U&`auA5MeRm6hEvart^l{Vj4r9Gswjsb;8olm=0rD|SV{jtUn=-kRK+Tp4pl z#49}#iHRrBQ1#=9kXF7vmlkZ>4X;$|m5Iw@E0=>zNJU2P@ zNIBLg18Wc|!SsmsII?}6*_a$kbHGXX#^`hmZ1F{xYXsLZ9`6FAdV923g^`KxV0vO! zh_21bs3S|w+tj6CUdWB4t>syCHEK14j@aGdo>!OR){j|RZG6gNQ|o!K8$niNua)tL zb${%oLf#uog|+h&EYXibzs#%TyZc^NUen!4mN*>=^JcGGP>7VEsdXDncXieqemvTy zjXm$C(-L#4@Phf+$Z$3Pk3!13lXKfPVX-d~SjF9G4j#pe@{3)|$ZOZ`b!)t=X(9BA zY|$Y*JKu>Nxl#1W?XFTwM@nYvk#66!Yx7vdB2pZIfbiZEO3&pdI!}$fZNoiI{*V2v z+j_rn5LIwb*G|5H>?Mr6y=x^r1ScNi5YmjDScE4jSUj8KR8A>1$JOPYh{mWT@Ew)5 zZgXsofg9#$ZxR$D>_i4lXE|j|>3NjeRql0*M`G)`6Pe!0;moi?Xt3XoG?d_p|7k6 z&#PLKT0mapS%<$1R%#b(pQN5jaJI1e0^$ia59^>c(R)^xdmCR5stv6Byfu@svJjvr zI!*X~e7qo&VT-IMe_N2Z$v8-Mo2GL`7PnU=3duf~6HEI{W#EBI(hSEkZztF3t~NfB zo6)FGIO}7xpaqj~35=OZEm4(MY-?h>a@lukHrLOJ+1P6ohFA*-P@64#>>>*#xQEdZ z)$MamelsFqz6MU|&NNBqF)9d}bjBnX*mMZ(8hbfsV9SA5RxFQN)fPlwpcn`ZMB8(*)2*v{dz2liNrxFIFRh0tSll(Uoyp(O%7@|s1J)BnOa|MS1Q#qYK|s( zp^4@_XpN~D3SFVXRslH693+p47oez)fm_4h!`eDb9jsyuFIw~<88DGfe zMjxkClpfOqi=y^ERTv*z=}s7XJ1@#p3>?P2yVzGkHrQh{R4r-l@iP7|JOm6I?*Y%!#E3RE3P7$v**FV3yYxCI1C)o}ZuDA7#P7c6>@HiUM} zug6)L5{IAA5~rpkd-+dhsx7*lAvoieEfi1UTS=}e8$8z9hn7?K@Za5BW^P?n)cueX zjkcdJZFk&qzxG=G(i^d|t}us?Le165?odpf$Eqbk&3i9;5raS?*PjRN);$G#qR!Eg z3U%pr{gm*O$-U8X#Q2#JdStrY?pcAqASuvMb$$gG(&ZB2Pn};o^l(BkPn+b^qkJ#! z)NE;H1^sv2iH)Q;JZ?ItzHFiPCe;YNUW2c^*GsVH%N!#&WU|6Unog0di*HIZ0xxp4 zKDx{mzL~UlWIZ5ht*IVe6DMZVjdAdu-J*2qMr5T~9hY>5nJ&3xJ6wsD!duC<>IVbJ;l*jWp_N(8Au-K+J&!qB=!XPfq*%^rVlhpx5m8ORmWG?aLf)Yo__~3yx?@lSiQ&)KtXe)JjhpOgz=H zAtA_BFqF1bJ@g|@dvwA4uK%mPP1)sR0aQB$vXO%L`8YzZu@7nLxoV)gnE8SEA~eJ7 z{N}E_)EYxdd6PH^fqJqX_uNZ)MsBOTQ@{fyh6IP>z7228zL~NXb1kQ9*7%R08_o2S zzmVc0CFA3Ut@k9xBlGyhFhX0SjM-%{hl(2!k6R_JmSW~L4lVCSyTvAL@;sF`DI6O< zX)ll1-y;5&x|-Lep?iy$R&VQ#e0Z9+tGTk78_kE$_UC)=w8TQTGOJB7LMWMrVZovNhiR7&eB!;IYX1CZ zyDnZrt|Y52`mnXV4vXuRJCQu3avpQ%t1B0J&${KrDtKI(De>^^bdG#Zxq6j~9Ol`p z)^-F(taLYtTWIExI!%-v`Yx>e(TRp!Fu(Z5CR`dn{X;MsD+fH7?5&f?oL#J;22UIB zQP60T*}?e5N!d7dm2XHoJ}oo=Em>mcOz*d$JwVfPcH4%v-C z%=dgdGm~jQFuS!DP|4FkBiBQ>KkqVnd14eNL_3eACh2 z%VRH%&yam+Za?i)Y^B}se6w(b@ykf!deCyQ>kHSdgh^lBm1n~#5iY|(85AJO&skfi z_(0IF7qYzqr}YMt>^?rpyuAAjF&nMlk3pb_dq`}v?uuDMP_`dTD~U^TZ$M##$!qhW#K0#iZ$popXAm0oSe0&6#cYhXc5|x& ztzD>KOMH|?AOI9kGu2O#n%OcNt?1WWvAvH zu8g?F%<{PL^3KW?Qtu=q^INj;(IFg@ilWxj9SLJS@CgfhJ)(n(RcOnIrOnins;!tz zej;F57dYa+KRs)g(8N95duAiKE~&sSq1bgzUTD{QXz#=>1210a<(-P^edUOBMDI*V zsd*A`GPQeBi1h8i|B=VK-X~twV2>t-@=)XUX{fm@!jhAE8|hG4|^+ zuL|~BTqlDVxoFeQ63*M2qQ}~vkGfE*KdLa5{-WIC38dRvLz>oYaV$5{bLDBJd>JX< zyvJG8u3p<2hEr%zT}nH)8CiwOs5w?Z2T@*h?H_vo4uWb#n61iVHJ6tjJCL><3WSgr zMJe%dVeZq#Ep?{tOvhr}V&-D*GQ@)I_T9(&DdR6%Qt7t!}kdj*XQ z(R$xYTiYsUaQDff(`oa~z~Y3NMx}P@ozTwR_6Zq+@O7FndbY&4cv_)j_1e}efexq+ zdrYxJrPX9EzDc8rYg;E09AA`%7UmW36CLFxyVqo9-6pEeP*mWn=m--Mzte=-ICefy*ut~*x*w|S*v_c{oKpcBGtNMfN7C|a%aCr zzb+Z)@b+Q#=X{T@wsppb64HiUbe(m)PSo(|nnIN4-AuddvHmVlrRe)Pq+&CmIBqpe5*b~DZ5x)78@8cebDY@U)j zF5jtMCO@0e^YLrz*K4{~hFa^|w~DG?(aFCw;fZ)I(ISkGLv~C``goWu_qEoL8LEW6 z^y_7?Z0@TU$w${_r_AJ8zb}kKYrV(3KjF=f@i5R6)t9tTpvakcHexD~#_^T(SSSBT znLJ)&+f%5}_}U5V(;%vyoVru$l)l}!j~-blyJ8~@Ch0i7^$m&c^7*c48lOS=T)CyU$J=4u0?O2d z4c}0E7Lo1}Rmt-ysob(}gc7$tACnBVv{$;PHK)KGB{VOw%@?hpu=nuB48PW0k1f6z zH>S%wZn?MdG-dL5N?fH$_|o?UgGlH*MrVe_UD`btBgI%5akW?NYVRPghmS50sNfVO z(#cL|@}dTUT8ZD(4c5$T;nCsVb!t3U;SmDaTjwQ8K4t@03@w6wcoFr4p!@x>N@1_~3e%+C_0cj@hlgW6M>Dy-dcB{VR*S?qbjS}z z6nM->jh)^F8*F%Eq$5NCiH#~|ndB;q;`=dCQhnR>TP+-dBUk@OOdRz?E*I^+6`*%r zooZjV-#LvSZy1a)cx{DL`139tEX~C23Jk+YIV#rqgI3wC8@*}jQ8w6>COe}kuH^-s{%<`|~BZBPv+ zpRwAz=jsx2-4>gd9k*iJuaI17=k;#G!J4vmtATyZaP{k%?#q#i9lGc!%jH+$bxxCW z)OwDC<&LQ`Vvd{Wr*RqOip6dwizMmMyb6}&7#`1>L}1=m2I}9N*G-N};mG2)Q~6mQ zb3QsX77U$9GT17f^8=|AOVyg~@k%9pBVX1x z3t8~1KHHQ{cTJNJs+-v1QrMD6c*22|fe&c8?7b|3c3_9uXS=hoxPj~LtIFhu<8AE{ zVjp(|I9o?&raNnDN}L#y!b0=DP}d zZ?&!$S+?81{<3HH$a&Ar7sa#6wA>txHu{VvGswBZ?D2rgb&mFC%wrJ=0_YXHV! ze)yH!ey5f8@!9Ag5qmLVva(1yDL?BKa;inMC6NADo)YPv_M)S?D)R1n^b6x2L79~( z#EN4=!Webp{qp898(AkrekOnAR0q7mUBzm8HZEhfX|=^Fj8lozGs{obU{IYG)iK32 z*1q0KoTvdOyAREy5*WMDgMNJOVeK5t>9{Gb^+|GTbn(M4sqo2LDjwX~zEl;L>+BO` zQ{57(Lez05J9esJ6C#_3;dI0)#*p3%L(v6@e3Pm%efg>?TNx4WrHW||x4E`>@PUNV zs-qj}t7~DR02UHxca#{$G$TFH_;h9FLVkitM%asLaAXr?EH&Da}KlM0W3>!Ko zq^0A;UohUJor$a43v{0_H8sn~X$sJ+tMa%~q+qbH(yHcR*P?mIA;B@Ytb9;ti|p8$ zhPAtl@;x{2Mws#-3C=6qF5f0p%>j{m;UTk8a5e@RXrZ{$tWaUQ?ZK~>xv5MnASio5 zHDf^pHH7|w^^;my^Pbf{Zjj=l3*38M%}e)=_gOtzOJz@D)qoHBb(f98vp79H>LRWUF5Sh zLsRRHOgO3I3(3JF$ZJ-c8CvVXbb)l3H3Z1m>tP;Gqmv6VMq_A@`a#iAZ8-DYo41_f zRDB7PGE1`=WdWfVa5Dy_vM01-Zn?W9SNLwVzreX{`L37xlYD#9>1D_DObwy6Ww{00 znk5s7pXs&n5q5lAV=wJuD7P7V-9QmwS-YtHSZRXLe7N1$@4_q%AESy07zibAWf$DwWnt3ez;zXpLF5beRvq{HxcZ( z^5T?DNBr>In79L8hL}^-8@o-&-xkNxZ4)HtP<6^(v4nZwpivPWME0g{D|s`>a&RE3 zo>Nk3>jSEUWMwnnhy*>P?o)@BYk2$*I|r~iOgR3lE<_Ky%2d*Plf zx|qJ~8$p3sfsFA>TpyzM97YQGT(p|Qe!RQBNzKg{{XG41Fq^Qfp!kg76gX#~RjjmM zx*gt0W75fk4NRn=V-pd;XY4jE8kwFS1&7ytROU7BAiLYy9?lN=0S;&u^^b42*wIJ3 z2{FjEcI3O3ZR@B9Fm8;pVM6x#b( z`tK0Rh4=-eWZd7v)|J~-+#ci@yP{_X*0~miG~;#3h9n_41tS%MLoRH)$z0Nvsxd6B zthhxkMKZo2Y-r=P!f2Afs@A5VH0WcL1HB`?pu1r{Hq5KT#~2mNw7l-{qgybzW=U-{ zzGM)0xW?oynx&yMJ#Eo-zCUP>TYY<~!?EWE+91)A-~7O_AhghjD#C6`-c`#7lDP0z zp&Plk7I&k38N5JtGaqS}FBpvGsN~{romp(n(#wz@NE@7bJ`~zFtl?QDuoQV+R<6#m zAvLSaDr!SU43BKTRI_+e-f<=8W$z;l&t`7pwg+gc+`c#&=8MLC#e8cB`b^E{>_O&V zdh|@d%XpJ8OyH`Bj$W|la-BUXRXV=*)rhQ7t224I+_bD1J6h8-EFnxb+K_HF{Vou8 zswZi*WS-u^cxBQ;DNxJzS`dVhLS$xmtK6TQWU z+-a|6yF=2QOikJL2 z4@EZ(=oj0uh_8%&DHnoXD(q>7-qemgr?kRh?=$!QCQU*qmp&tJQ4x`!leZNF|b?)M@+*{!{0VsTLY;N1m6U0u_5PWAUj`%u1?fvB4;Hzy-0 zeH#7NR69<0MKjE}!eRusj;VJ~OOAEFjWO2Gri5 zjShJ>G8^F*ccWA_Zm?q&<&SIIxZiE(qKN)1`ekq=@Q_3d9q4~h$T#SHhT)Nk@=$gF zg{`qSIgAk4d8#j*%h@^xKB6AodTZIYKOSXnK3?FqcuNYs=k0LBD5tAstydTHe| zWhMfD>?wsl*zueDiY-W6k;c!vK@T^F=@f z;5VYJYb(2NKx>6x-sOb90GKLN+wm5Y-APLu)y~@p%dpua&_2+*e4}i`f>Rcfgh4fW z7;PxAEVr7flEPG;g4|i~G^@!a3cvH9E=y;~DtF{KuQp<4?ER=$S+=c)U_yC|sbK3^ zzbeb}@E7fIxV{D$BG*&m7L2JG1!DWUQ??hAUjmL8%oQ&2LZC{kPzWps)ar9}dzhf` zbEg4BtGM7`t08D)D@qq#sD~2+MCrLM)GLAqUQ!kpK~vie&`pkw>t|{PFL$19p6&rt z>3Q~E0ScECRjGG~4P`tIund4fNl!5<6=2Gf@1p5y1o2fmml`oD>icUrb$Ik5h2O{h zH5hTo1%mcC@Uj_5J{*kVH<&U(7eU>}&sH-41P}}w3fR;P9$}=w1f*;@=2!qTk1(m! z$AZpv4k>!jpPmXj!;y5Y1@VA!@r1 zstNl3bHLmR(o@XgFfjX=qosgw>1S(3KU!b`g^j%fLn)vY0RHA|c2^L76HLG;^=*n0 zV6+~9z5UDRe;NI+ME`3#{xu!{=WO&az-;}3os|p}%sRTRzOWYS4Lr+>j|VQIB2t_e zAf5m!s_dYaGyLx1a=b7(XO9%^UsB?|(jsIt(Hy~!{nEq?XFKUrkE*<1ajV4XlHKE? z>5;4@qg4$XV9%)zmy@l8U+y65@^+@~t#249QZ1Y^IS*it&rabPiQTQKLS%=t@){s4 zD6fAH@SovDdpNlOy$r4E#sS~TZY8fl$kB|s*Oj+^?8NpQxe;oBs9Ukj9=)jyTqfU{ zX{1D7&v0UQz9Tc(eAFmjdAA4st|;hWt{9vjnVbyWlVIyq zSiS1HHuLq-#w+`Z;{d3K8y?OUtDHIRJ7XLZ5z=4?84UKBlo894*IfL(INbO6$9Mvl z1my)V=UaRaz6J32Wh5=-=8|e@d{ZoO(q%_9-)n&offuh_;J90^RZ`@I43t{Yg?*>G z!n17gW(13A1XKVF)50FYgt8G97F*XgNqoI8s|!i zX)8G6Rn|MacpMMo0=|n=Tfuga1dH1yCUJcs>^%wW{FJBOVpt&Ll%W21JjXI->7=}h zf@QzWT$iGHB0PoE?N18iYIGCDEcxQ4x z1J3Ard3p8K`%yYOgdI74oAw;@vW3cCDsjaDKs9@}84&>an1tahi9R;p5|-fGB6W|3 zk$VD}=VOs#0bF_ohJ2zyoKx0cH}S-$b)q0bY!p#aZA&VW z7K0(HJLJzDm3V9KP^$lZhfd8CxjKmqV*`jYmT;cZDz?3gEnmIJ!M23OKEGEWOfi9w zONipvWKB>E_9Nb_qGRMKfS`1PO{fGaWqxyToD=QoWicunn1$!+n*v}g;^j!S2WT2} zR=cMwxR8YY5HPn;6r^9fOu^el3z0R^f8?W+iCx_N9?h7=)0u>QHf1OTA{U)g5ZUYG zj}iRyOBe1!n1(K5n73IH09JrJn-?rKSdA3l%&k=Jf>LMHmuMbQ};6kg89%ZFZ@IqT})=Wc#C z1nRd!*KD#n*{p~ijjnrT!Qtl&K`l?Z>{d8WXSt8rD@c`J{hX2yFF(%fm5xTm32?NRdL57Htf9!h@Z}}FPF3}C2|Y8 z51_YmxkG2B#Z7J=fX>41c9tkiCn7(lIh$Jpq;oPmq{1T$w!5^%m6EY2Q)H^-PXTR# zO<>ffi9g2DpKz9FTQ*+XqC3a6teKaX$W0*cH3`*R*>G7sPvN&iMV;vJmNa(*v0|X$ zG;!H?WJSpGIe-PK9QPX=>lv0OZxfI&cO3Q+)Es`nVjNkuk`IzMXRd4kff^FkiNWCP z?+qdUadpA3=-U2omp%Lh5sQG}QdG5PAKD6Z`_GSDkRFs>i+1Jzucsi+2L1C62VyOM ze*Cu|_tHTG`m!?T*gn+zKZE}DI!0yo>xn87?n}l5YsqTxN54O)$6w>Abo>UQ zl=XLXXbJiy0paBh2PsaHLC7TYUlalDX2X5_t*fnq>zsavXn`C6t3LuG(_Xg>`9+KZ z8US#oY?TTw41V1p=+)3WE%)sA+VJmy{q6fAE(Tiwq4J_<569nDO}~!-r2-&TaPHc9 zA;R@0`TWgJo#TMO>PIlx-=VRiunNu$qASF*r|WhRPg|z_+&1Z zA~N|hc4d-P7e!|M~&u6?{ zmLL(0qf=Rhp9AO|_q6bXl)u@YTn&a<=?_ih*AW13MZ8l*{n`)+?DQ#s!@WDmE(xsw ziGj0_MZh1X0jxDRd|JH&Bp43(Cn53YGp2h*fXD4r$sRCH05g=ob~(`RH$!zy!7v`S z$WG>yk>IV%eaVPFL|+0ix>2}rki%~T@S!5A@ceJ_fWW$e?DcrOmxRJ$U^X%Ae?Eg3 z8v>Zd6tqhL8oGb+$0>n^KO%549}H7nk~$4P41^JtX4A#?o2cIvu&Kg^Zq(0JmH;0{ z)D8K5Zv_R=R5)?A`X&bi_6W?T?cndvAh038V+Uu*J#GjRyz~>;{9E#0fZP%_HuTjQ z_6(%Uh-*?abiawJ2m(qkMdba}c-;XXmeO*Ves2YZp9`3Si>b(S3g`zQS(bmqf&yv> zcwAONAsL3@-_I0jTh9F9_m_Q0vcuwxEc6n`AabJ?YVJqB`n;reJGFGIO&DPyzWz;HJ@6CBsqsqaH)sO zyzAOl^!69D-wjWY62k@@?$~@or?+hql?Tc7ms+>G*UUfl>b~MA7g&P<$r9#J1dC zdnI#k^EqR-M8`S)2k?EgDlSgJ1S}psx=l&iydk8On6q8>8^-8j;t0VR$H4m1gV_wt zpVJa8g#<1u6YD#VyNA3US1_-qSYI;A4~iwa!yi-aOq&kmE!}#keV_iWRjb5s>BUkj z`j>%BC$oUPPYXyL{^gG_!B9Co%57F~ZZIP$lV0~DDM<;J4>*)lDXpBcv8ar%5&B?! zzLqT5ik>DNvq3KA*V^R7x=-7=X86}_fG3NUNZa{HF6z$v!yL^v)HsxKNjhufWb+yT zCG~9+2tPUwb;&b)cT<`$lm|h$gzS;M@SW*pvwYMP^BxUP`3=q3b9`>`h4p`daf95j zE(d6d-C@E_#*03tZaE?*PVOedE46FdNnFf*WeV*#*`RsArfjFW%Us}C4wvFfCdG|yI}=`~PIcBO>W>4Up}{_YvWCa)mgmiS znH#6&kJwjw+_DQ&H9psot)L3Q)!7d+|7b8Q!@8-!Vp^=$&RJObjh$jD&8eu~AtYlQ zhSYMAOJTh0&)(^on&!|zqJbES-H3kA_e;|SSuS%dxAQmbQ6S8IRE?kT!1Rgl z!_LsWy-rXVr>y3hfl=P1@xCWUn+^}L;Fy85BzbkPG_`xsnX1FfKabf>h-hl50? zS^tHL>YkPtbn3A_#}w{`dlD99Ryf4xRVnRUS)!FMW#1E zhi7Y?^i(>vr-@}fc`J6?CIhQn>R3}bHJ5$f9o~ElbUC@jdy`sd0%ELOV2~SN`FRGf z9RDo;a5Fz1b(VA*AsKd5>1xf&%Em?7L*)qK+~ioHQ_UWEVD0kgG-#sRN>+NtbNK$` zL4SgFVqR#{Ip(1(9@yEC4*A^tW`6tonO_cYh5qRWJoo+X{YYov7Ycx1VY`gU29_AorIryl1Bi{ay3 zBPS=-f+RAuC$+W$wt`dn^FRz|l6`XgcSv@FJu~J#pxITK1!~@!? zDjdJVq|8aZpXAS8QAnSagBk!%8&p^y{OyT&ukecDHB?Mp3A-io6Wb1NtU%MIZCmP) zbi+IH&~7mVw-DrEQ|WP0UIbv0`Eo(OCk@x0MNorGt;b9EUL0E6PXL1I7nD~p$g9Lv zMWk#+HgSH5VMSH#bQd>RdVmyV|4Kp8U-lK;;=`XzS<5-8NT~kiy6Z{yS9B$#^4_C2 zW^-MURjahby`H%um^V2FHg#$x%+CAVs_udTjkl^y*vB||tw?HblXHfhL#|=cFrNxN za-DfS6(zm>(2ylTQC&zKSXP~9Y>{4GJ3FF(0zCa6Lqp+66O(P#M^vgFc) zns41$snjY%m_UgoJGj;ioDbbHYy<_F{}{vR?&L0nt`e z%8EX>FpRF>7Lv65{TMobal##u!B;ypj*q-es_Ff0F`D!7xx3wP0 zrKi2ehL7AseyIFoAgYb#m1^=8kjR|A9{2vYKc`~5CmkQxAl~5GU2u=OFLSyrK}R5B!(D%|16;SOB5mWf~aj)VgYCdRWz4Q_@z#{E3G$BT=0{A*XvU zX5Y(;f2U~k{k+wf8@$P!SqQw$mp-e7UtUJ#18il#)pALl=jA2k8-8j3=7*FgmgNWK z6qSMX!@gXbsTq84X@4tP0IyO06AUDO_M@A(0BGIFv(2yiu=-Mh-t^)7;bZ4@>QNOW zU)Mp4=uF$Z+S5*l;^Z$qax2iEMqnb2hiifqsXa{lPw8gjWcZDG8SRSba70)tooBx% zz^%M^tm6^D)u(l6d9ruib)M%s=z~iw&9h1z8!9kYwLe|v(Hdm~U=e=Un0ecgbVSm+ z@9WFbV))>Ik=Z@as#LQcu=r^?Ok!yavVU*Iu-|bj7>vqWqPR2OHyHTh`1-R#gC8Q{ zj{=xfRKUd}uwQ%!@5}(X5geEfsdZ)nsYXelLiD z%OHq>#{k@(kQrcAlgfK?q}2;ou}AG-c%V^6D;~c-4em#K2Z}*s?Hg+hkJ6%9o8?^V zqxGgN5>_ImCoXSKClT_g9&Bx9=W}HHzMry6k$JD-1VhEW*5-l(jDy5@P|;GR*ELMY zjX6+LLhrW6CM@(4M_Y$Iu9}+dam-$32WHJ8hr>thmqX_~(ky|e`)ZqCmjdiko!DK{ z?8K8&g-FDlq^|@q@T-POg6uf!ehhY=viPxSMNHz>jysvuJ#xGAAW>KEyck<}t>)Y? zgsJp?EXF}8@(h6X)U{{@2K6hxlXmbCcWS&s_N|?=ijaEX@WVyi14kUd53qPKdeiBE)Ufm$f+AzrDXgnx@Tl^RTW_v#`!XINEJ_x z&mpXRo%eCwtpIlg`b8hQ`22c@{+T8VF|RF)XwO$!o zcOmmvo|1b0104mx4n27GKQ2RMgPnWypWD>_Y%TarWfHLMWfGL)kLUaOTlxM)t0~Vy z{&CT2@(M5mU$h|7b`g1)M$ozgD(f>CKUV?<`hA%C}e~-zF2L()Az-e*n|DL1Y7m%+BD^pSY zS<<~Sl{(+uxylD~y;*MVPdC6taE}4^4zWHL`=jjTpt;^XJ28aX&kcC4VL|Pxzp-i! zJc^+OmDM0nw_&{0|Mz;${yl!w3QC|<3c4DUqW-o||JIKFE#dxK!u_{|`)>*Nw=Mqv z#z{(O{r<(!&yEQ9f@A8wkqYM;a;RrIJ<%GA;$-&(8PEUy7*PLOa%$>@^|iH!<~*`a zX+t$$?suEDS(XJyT1nu%yJlTGPFmA$urNKc;~+R_nFGN<8~ksc&FOGB#a=qXC(Hi1 zBcHN${brDt^~Clx`b;}cOc)+*CX1>i`bOiOG3p4Gpc z{B@?wEGwx{rEj&gMFD4zhlc^r$b;$S(`OPdpC-}tx@7=3jiibRM63ICD=47Ts&Bf#d;Pc()dZd3Ba#hk{ zAJ6-8Lcb|#J=tVdQ31hC8|7~@5SR#U8vWK|ui(!qr~WVY-ZUP{_Wv8tG%`qbvM(jF zh7ih9Ns&;PtdkT)C~J08QIVyxFOA5)jL6Q6l*lg2*iEvpBg+iN%zaGPb$!3r@BiR_ z-Ov6{eviz{%z2*2c^t=Qe}CQw)BsF?f*;+2)_9Jo#i2T4fFJ-{ zUVdX`=wlXJP!=axvA>9I_1yovVuIW^>wJpU%KSEF)iutCgV48_RvxoT+WN0w^Vfpi z0%cfty5tl8n$kZqd9MPDd!VUEHvw`GcZuRUJ@dro*Y;gKt&lou-Y9BIOHsd@|U$e4spxDB$@q4JY`$S-c z+xWuVi{BuAzYQBxa;+Vyz3Zu{aY2DLGtZ$~3oGUhni{a}BLi{o;}Ex6d)_;@Y_Ckb z4zhy{aRI9IcHyP_U_j~Pnh*GY@IYY4xvB|GuM2XWs@^n={*gpz8MBw`L?&paIkp#Z z^B^seFFLAeo2pjG_^V|O<7V+fCn@v>9inL;Kt)c!+M=d4U3P)&>F#>4{ONk2oto<0$tb9|b(9sHwBzLW+>$l!pC`YxNOT}|paHK^Quwr_Ox zwX5*XAAo}f(m_AQe^e6{G_vHnC#dOD^!_82G|lPFuK(K_4jr_S!~>!M7h8)G8&kD2 zJ4oxzzHH}hSDS-h>h&{(AlRUM>&|4A@}DiyeOsfZEF+*6lL++>2r$=gyuUNq;FdO0 zns1o?J^8wPeCo%zi#kU=S?y(?m~ zwnyLj>8781rujHA=*(6CBSiEet(~uf8mYb%0(!^rQG zb-Mf8GwI&xud_&JwW^P9J*wfo1|~E6Zg&HjfK=w?H6~&PYGcRSd{FlQke^NJEsZkWml(J|)zPeE9)qzky>*L9 z&2<|Jq3%uAqy}?KK514=)WWw%)lT^BiNs{$Ppd?hKhI&id{Q(fs%CY>eLzdE(U`Gy z3aBkra%_8Um{Ll^w5|))`_2*&fQ{+hfwmL+e0tr$9LN zryl^6;Qyj1-{hfFFBKcr1~lwW*FISR6oj+=Am9i8Pe|4ZXwti9TXMn6081u!E#R-y zQvL>Yi$U2QngI;TJ$t?c&|(niWN1BvWd94#9vT4tL3D0=0RUhnAml7jD;A422e~5i z*zR1AX~^Yv0EyLmv}c|c5}}nX&jM@ANr&_Rqii=<;Pp5BD}YD7p`mK7OVz#vxg2oe zA8vM}4(-QZD#48_&#Z`<*aNd17TMhc3iZ6;qx`EP=s#@f{j@`Aqyf?S-{^TzFwqCv zIGQKHUL7DGd@DbN2_5|#&t46F>x<5L1=#KXgV(VHKenXb3A6(DwNgybKY+{u|1s)Y z*9GdNkp{(QXOl_<_P}P#<9*fNk-OAO@Q>qRnW=TA? z1ET}4CLI={9TfpmFHNlm|DmN10+W{mj;i_JiT>Y-{@)$_|F7z}$q9UwjfTpL}r=4<4JH+a5Zxh9a)+7rE}|-Tnf~3l`~$0Gjj;ILv2NEO{ZtAOlx5k;FrbN{)2D z_@W=3hZRzvih4Ly>NyZf=X5c8`|HbJi;YMH4h2*~GyorVG|Vvm-;bQI$eigsHXtPA zFfdZFBp!fu>K5L5D$=o;urq+J?awpK@(_}v(}s6lF997uZDqXxUndm{bdgSTh79aD z$Z|^R+{69QJ1CI(=tyygSd2Df-s-C}E>++sPGTrZK$wuwXDXh&SHTrlF3jnb;6& zu=nrjE8gEF5~q8^+De{=B8Gn`KLn6YfZ{`O9xv2w1o**8j?RmSV%o=3fEl3HTY{0K z^etV}*WcmnL%h!T+)Q{F;BU=4#L7QSSiaxc144yIcZ-4L5CWFN+qv`dI4wJE+M*Qo zBovY3oV{GA-Rkofh4l8|?>e}h3oP#np!%J(c2BqD_1~i=QYIFtyApIE0|f&t4JrpI zfabiPEv=Ka$fm-|`_QsflDcQ@F6mcHd|G@D;59cn?j{H*C;|21^;A_i69Bt_aVIK^ zy$f3PJ0IMpeg*g&;QSe2LTLw)#OY+%fQ{iX&>l9y0kYt?TaP|@Qlu53kei}SA(CW767t~iGToNa6+cItij)>tqp^H zGE63&hWy>`78ANg5QJ%R+diKLw5$1WJHO%+0MvpeeZw{M4Sp&4naKU!h=}oAQJK(~ z3?eqvrtXA|-|0p?$)i27#fVY8?-r%rZ{oJD6W`Y$OxqeKq0lDi8|bz00}juI$sHkp zrw5p*@|ic7t~h^@9;npMN1UHoahq=XA#C>f3s*}s>NqC8h=^|lg%*|hJ= zVOa!h_XQO}fFq&6J}74F?cU*wD_>zsQ~p8OFwzyO|IH)7f&LpT{68J8ArLUdkVH~_ zr%3A~EUV<7V}K^*z2^5qPx7;06V{+i@tQ_m|tHn*~>H*8m(qJN`6i3~}W?03gav_$;r#1y8hgA_B7ib=sAWiGfB1K+&Lpz^?y5eK2ORQj{+B;8~P zRD1bb;QjDqFo89Zlv4XI{qDgVWa{KZ%^^$kbiWYOtzZFN($RTz-7cPHFRgpz*woz9d7LiFMo<5^@`bEJr`i#p^;k$W|L1KZ z?o$V++pQ&du^WRSzcfn91J*pZ7bfPG+jokknO4e)R^7Yj3R5ZQIm>P)K}^~Kh34k* zR>CCY7K02fMB?h=iX}9-!7pv+dcJmdtP{B zVl}Qi@ym18039o9>&~up&2k2Y$U+dLAh?Sb=1)feQhE!M?lkrGO(J%IPsB`UqQP!$ zCJ}M67NXGhDhn3KT~Rmp&d<(Qm)#3ae$GhPTXR=POJ_~dKPhjT^vKAL%bh{{X`{$l z&536Rpk;bG*w7e$m4ep!GZ(cW+SW@4T*TO_8i|ns?3mrf!YItOr=c)Wf@l4a%opZH zlTlsPg1SvZQ8qG9C}W@F6}$#f2%A%U*yxySjRA?dj3WV`$rk91|my%WvtuL+A1EZ;`oT2apdq z@42`r*ZHhOn1iV)Z#Yqkp}@YLBs~4v@S?=Js;dZr#Zd4{zjPolvY^7NJO~0d=Je&+ zM~oX92Z$x})Kt_0ZU>VH?6Y53wW~eS$6j*IzK)7Xna!CMueHFBDu7cnMoz0$J*Eb% zPw9DPpwXgf`%~kXETho6wUi}uD*VRa=Vy$Z5fiu}7hKV{mICi*+=(w_dS+%7`Z3B1 z*|D>BIPJ`%vDDOp#ljTKB2U~@+^@STAM(Wr&!5F4f>(IO-lOmqM65iV%=JhNfK^T;E<#BjK>egYtnA7 zV;Zqj9R|TI`8j*}?Kg~S*NN$0YhPZ#A6xx}VD?*ON1`0o7DduowH{BV+Q-R^jLq%C z$95F&U%+=#g3(k@r*3}{<8+PAt4U|wPT?OoQZsh~;Q%}rX6ujfSLu(A`A!_EqBPhoj@Wf*hoXA_-jdnIP#UDG4F zt+f%05~`p=p_ma1Qu}z|#|E;<8pa(kugjHUuW(8xu^$Uhb@}#6u`d_lj!_Rt;8VPP zarmReqVMvU>-OrBqEnaMu+*oO6veDgHTPs!jYI{xQQSG=gwsR-^-CsE|FDSL38PvL z?$qEfTeAtn8@4z zuCB8CYFZEG$ybk_FA-CWxKJ;~}j1yX+FQpi?hfVM-*`V3t}FM#I#u%8QTM@;;Jj1Q&4#?4b!)+=8fQ^!!+f=hKbZo(``7og z9raoczVDOb{S2R`jQWl(>^PCk;mLX{L_dn@p=V|ClwS?R&gj)SW=qcOz#Gg7ej>+? zyhiaL(HbfW4P{k#20^ydM`lEU;Qis@KmmgxKVJLyO*y1HPB+fzy)$R~n@0UHc!0`u zRR}aVIHLP-n*c)B58bDQEAeGxvPtj8-Ah{V&4cIS#lt`vNJ)`*VA9(tPzmM>aiF&hrd z+v60!^P==%`A$B3qIja9F|k5GRm<)LW{*Bo#;RQ}WdJm+Qj2#c-pmL?^>(TD8@~>Q zUSucve@}nP@sb;cm>u%@~Vw%gH$!5b9<}-@%SgPi&p0;`QG9L+y() zVS#FO_zJ{!KL@izyyz*e*Qi5CwDWqUM_~T#au|6eNT3=*9jnpyf3lyQXvbK*h_*6N z8~4g`%E;W84tzx$w=_5D#oi+6{X2g>Jnn5(vJ8~tL};V&5FLsI@v_kuwUxUtdD@}U zch#apZm z_7jEXw<5~prRTv-3?H~nV#a0C3YYG22MKEjKe_MhCT&?w1YY}k{iEqtk$Nnv;C$Wn zP$G#@Iq>U4G^_3N!ll5cDNNHro4xUw?|e_IJQr3ok=_&OPFF_sY+*1mp;`0FF_dif|3-mg`K$NWzACR%| zO7cVv%pJWd^0f0Jc#=c}ae4R#TaE6LC1ekU8fE8qjrn~J&qR)w5jjm9Q|{(mb;oY3 zA<-blv3*Ax|1mH#ob!otojtJAQ_o5Nj&_Yr(=st8mnCIov?^@ZbU&_8j?E{&Y=WD0 zJne&G|ME8zF3bf#AV;j6#9j5C;bFe-(Ea6}4LBmGqBo>(@$CT!R%^ejNGukH%!5z? z49{__`q9gwq*+}B1V%8@FCIoNbr~JoBun9HEWdD@QvH8{7UM&%ht0$Z11*Us5NiW4 zf_~okl!tUMZunzI;8yrj6s9BqUstKMqWAmPWoz_BeEV#;Hfp+4TgqrsfnbZEcYP7Z zNm>m*A1A6yC1I?P}0>OpjZ;nT*d<{!)4E*#g;`-||fLg(W0HpW0W zbWxD>Cfl%cUg6cY9X#%)>D`KqsRxuy5C3bu^RqcJ$?6>F(W?FaLhg(l2XHKbtdy0$ zo5%nMX;o0-p11kK9>J`%WQ8pwJX4_UmB*v5RsQ#PISA#B6E>R#nM)&SrgCAvlzr`v zTjbM}k@eXh*@hF|-&Ms|BisYVOAl3|Tqw2Ihbt9vHaA*d^jvCnt(sWeF?4yKpP$dR zLflj8fR|L`#u8+0XYXDa+VTQkVX5$D#V1B`va%C@P)N)0@!tac{8tSPBa;;!!ikfg z$ecR(xyhCeHb}l0n>p>!W6+u1guZUr`ONvj%U{z+} zKv5%$v;jQ7ku_jqJQP-D>{<74{YPvT5pq4hQ6XM+jhD1IdFiM5yvhCTW0zjRHZR;X zJx`Q+YG^`x}zDvWIKl3N=MOB45}B%DC5MU;uM+Zmi$93OSB>x_gM zl3d4kr$bpFwrI!hUF`n0rXH$piag?iRuNFsQWb@)j_`hBG-XCAQ11(7=P(FyKb$+7 zz{q}AVuv2HQuXmsPRpR{wsLBdNG|G0$z0uPt%Bc|#KKJUgJ{DsB3EpNbj}=|F>oVRwThORL z6mlq}Fx=HVyZ$xj{5Spl2d_>e-sVaL$i&Pm`yROF21-wHSpX83R0|vfi$~kY+nGBwFi}X@=$+adqh=$L?;C=Z?~vahhDo z7JshM{Ut-~NpJ+8Mq|;bM`(sVuFqBB*6doIv$A~yTRVXUjQTQ4PsKar*<=>UrlWc1 zJGpBq8>7G-NLi})^K8ibZBj7AuwF_Yk75yEg!7Zp!QM0c4cCLE6EW^VcVk?jJIeA% zMB7}|WN@Lc&ty8hhZxZB%{kW{VbFzFQ&mklrXPLyC?+fGSm}x9RNej6584F|UO$_{ zF8B2$jgODR_Ah@bTW5o43j{YAz^0$*NI+gQPQYV}ik1 zVFgo#q!a|%LcD+ZX7+C0AYUML>IZR5UnKa}@lWP6wwe7r44b35PFmh&ZetiTq@8w_ zEAy0n)>_GYV z94})uafz^EBCRF)k=T5N^Mbx_*xl@bVV6gT5aEWcZjUz~mAg~tsuZOlB{IJ?5)>lk z?=A=8fVSdJ>lbf`>_sJ7}@$AT7 z(A(bHxG-#o2#Fz8)dUM8&iz~7pe|s+Ph!rKvoNxA zMBtLwc+Y!nz(68cTF=$Fwy>aRFrIrCn0jpxY&yg4yaAtt++-xnvTiQe`SKn&`?CA+ z#@^EsTp{p}>TXQ9KmA$}(Ros3oF}z#Is#j&ZZ1w3U!>?(&;b*S^DVPosbe0g$37SD zc>d-!``rdY2RGybH*xV;F?-;!V;|O)OFQDBtYmA`;57-zB-2&u4#5f*SVgkF8Bfle z8#4?N30p61k@T`1gJYBiWG>ECWu*j1NE2*dm-sUmT9oiVEIdCXIDR$EV=4cQHz%&U z*x>r+xZk`cw5P9(wy z6jU%w3QpcE-9s+PrVdTXHnvWbwOS3PFqyoS$|%%(#=m zM0r6z13$mAdmfLARyn4du}a&{AE8qmBsXmh9Ho54I6e)}3a1P$k54Y0E1&(za4?dn z$4zoW#NT1Nc`z&Dx0G7{Rs>d9UV<>*>XEOxS|~>tmwl2UO*Ac0?zb`I7MGo=lY(e7 zJ-x0~B>*kU>aTcdiZhaM`8Za7ft^&rvBR(5OXilbIGl7u9GmvMkEjmO4$}H84x3J( z?cS0oL%fUX>bcDV6RgkHQ>6J&-8*uAuSms9&Tpx>;9*W!7FfXE(5M(!C9K~*%UaM~ zNd0#E{=skk!<)3U4I({whDj%+JZ#mO{c~ccw$+vcI1f!EE|Q=r-idY<6fy>c}6 zjq6;I6NpMoKZ)DC=Q;6iTzOnK_3$dUzitzbo5+7QF9t+%9s}bjKgYUZOi-#?l}GQ~ z^+}HGXNA@xV)u?^#Z?OzpvJaLq?sE>*tTDtLC-G}2H&P&BQQ%YRPU$+>l&?stz%9g zkn|Z4e}jJHB@bYEo;q!quIZ{z&nA6L<$N|Z&w!-B=SRQfV26`K8z{2{g(cElBYYmY zC)a3#!q9w@z-@$?o3QX)CdDsBmhdT$1a2e@+hzM#WVJ;5IZebTpik&h%AMXZMM>2` zCPNhJE;bl_ha*O$NW;p;tY`Q$xX5c=?i(djZ-qu4mu{2PU{ja%t5jW9Ya_ZVBY72) zeJ2A#LOT^2s%18%Yr$Y0q$1YrJm+{x1*U{ABKy@Q?v1;b^7l3PMJEN%`Pavqzjru@ zef$VMd@n@9;*o063+pkLK}$AL$-JC|jjd_U=DLJoLJIn7`s{vMDen71%2``R;{3Ok zh_qEEtDuZbZ{Y85uIl0jJlrum;lm(~TuU#cOiOS4o(R&zwGDSYV5+4cnoNT77W^xD zeb!VH6%eX7a*dST8wNybO&|3csrj$Xw_%$N6L_M2Su{;nubV1+&USR~QcF$D5!>|U zz34mfiIG>AJS(yd6ItdfZ8pcwwPhHk#ur-9?_NngWov8Au({@F+S&rb=TyMd?GK+c zN>%L)m~adxsV@%F1(;{fd~hN|5C~7pJ>1ikFDmkRF$PKcL8?cP=#S14zr8Ca;!fU( z$jNOA*x%nb!^JFaK0{r=OD-P>MzJTUK?@T{|>>7u~jHUnyr<+hJ4x`28KK>7ao+5>XH%g^jtm{#F%~`ex$khIvMc zYW7NM4hAmU-&}@i#^0lUng-6a=O(cZ;2=6u1En%3LuzZYZ z$pG~dD2B09ueJM8G8FX%Co=$K?oIUeo&a>Z(&<{=k^q{CIDU-2j<2Qd@+UW!4K%L3ne%f~z3Vt-tib zTrVP;9_?=4pDodxdzRB)XXeFY^ZIVpm9NGrk7qRU7u0M-@Ms3AAaOPVg%4cn2#dc?o*jJS5E?-;OtzB|-LF&m}*+W3kRu$5l4Ct)He3D(C66+M8w!A8+7<5xMt(~Fc^ z@1^>s)hV~t?o*>x*S7=&1bzueP;MU&ZH!@eEUx=;ZEoDRd!yv6=?$wSpEC1|=`oRR zKV07?cHL1-LOQQB!un^0h0|;2&XdKz#0c!9`?je=^mMSV`i+sRJ;fG__x(B2WRv*+lmL|;@;0(>KyuzPF~?>tyYBnoeUbKQ}AjtebxOd7^mo4)dW7%gO?&NNMk- zGfkjG_NpL%am~Z1aw z57;j96?)~g=d>mcrg&yh0d?U3gpB|)Ddi_bW91-zY$R|rq|&c%Q8Ex?F71XTHYCb! zGOnVdVQpijYVxGD#NbL#klKTc5XziTSV8tSpSl13_u0-cPwHyjOvFwNd?ar-KY&`i zyY6l_Pq8wus8BfsQ2H3l@sk3Bca}y5Ta9x_`1V0yySP@x;;Svn;p6jIINGmzz93a*nC?r%6jCIZ zVE=ix^y63-!?r)ya6!@2mC@kE;R5w}X2K1~QE2mj?&+iJ~-?c^L zZR^!|yZg#7X5K`=*+}^h4_?wa0O132*j>7DszIJ{S`W{lw;tf{5As@uw!12wZ!h6< zhc($Bl;7X=H#42dnRQeS`P|fcelo(X0a4KU?nmERjng})*HBN~U~a^>SW&xAc?}Qe z?!^+kTiz5Sj_!lr?4gq~75fyu6~~!^_=Bt1ft#R&xp5J*>$jI4BBHanJ?g$CPPHfY zP6Xj>jHjP7Zh{guQJlyz*S#_?{7|sbP3ImN7ra%Nxy(qNJ)@SQEt8n&9Io0yF=-1iQ%kjHg~GD4Bf&!1Qu2oQUyesd&)5}I zjXHLv&7?b_R9&sUYh^J7$~+zz#$vXY8d;#j9;3xo{!NyqyNQSi>(iB*3C}j#1BiMP zF9^m>ohJw)5)ajm2@jLm<`~kQ8sVO|7dWtLv+q!ydN6yol`pUh4=1y9 zGXq&32b&qU&v8KK8*KUtZFVIH5}WrI#DMOLQ3${D61ukSI?9y4R9Q{~M>j_%hBNI0 zfByD%PO95xty#ftmVgLz#lXrMC|;b52&_YeWe@6ynHW5)L(vbg_Z(eaKNK?iMD?m! zzi~1Xy1q=I|M*^7`^992$=dzF%SANBkhhdRCwhPmb`t^%9E5-(zo;<;F}-g}j*dRk zH!u({mpeUX1r6>Jlzmj#!xo#q`f(u<`KPe76yzClQ{9i?{^zsnAe)dw?Nqw}j#s{WaEw%e z_SdIxQabX0Kd5JJkdmk64f2DB4~2xl0-?eED&IWWLJvS?>7ZIJbdX_j@LYvmkdZpg zD{mupW(siLJN};jKJ0Ndx>yfCH&m07A`rwXeL2wriFTk%RdZKmII`&{Toyl{3smGQ ztWSO7u1MwUjJa{Yons5BYTEW*^e8Kn6S_7KtXc2*sHjGL{2aN3weRTxXaVq95cGLo zj7VvY)~rVO3>g$-uvvwmHLKYxIAO(FFV0KoHQ2A>d9vD$%x(9NP#}J$P)vYbgN<`T zfL(|bd*HY>dTIXnG64Ns#4Wl9nHu1i_=pAPxc4_ohBbY=_Of_<#xiWM*@NdLESYF8 zoDfoSOA{&@3z$HHmB#qEAx|Y@^^sW5xodqB6LvaE4&M%Ep^QFx{izGHnpb4T>BNW= z4ATpV0(Oh_!Dj1A7N3czU%o9e5??ENA5!-tJ3zg?v;Tfygfqu!)vnXPSwAdy_D>GJ znQ3cmu-MpQTA&M`_^nyRk6TVsIQ#%q^c3ifKW-Z8}ewRc&&fohq4dULn z#LpRWv4nT0bYCy9OYFU{8m0j4&bZS4?mlZRrAufmDrIjkQW_vBt7D`I#X{Mtu%?U} zPgottsb$-O`C|lzeU{Eh+M0@kchC?0dW`KdJMt`3;Lm~NJ+QD`mwkPG1Gu;Ee+R-S zx%O1o@>bRu$C!2PyTc8}cCVLdNpcwzUNc2Zf6(s|`;s%PhOWNH_H~<=GMNH#C#dED zgD2A8<1GCvGQzs>7roqyCEZ`Eal z-od*BdvOw<7yk)h*Yd@I%*ShewGFMp2{3Mn;_DYK;xwfmQbtIot*z z+B9g~b9M&VpiZS)q8)qC%l;TGa^r(ZO!33nv-2aWYC?9meSl32lftMnJn#vn1oUQZ zKY3_d;}L(~*;zWU?%xcod58e|;&kpU0@14l0F`N526#z)Ep~z@ zpgic_TOHqyy~)=EJCM>O7rrgVZCeTSCnHN#ZnBPvXlC?v0GaB3qFuSE;yEdAe0AR#I@!c;f zY1p<>4hDHR{tHko2zHG4muFE5s?kOCuse}DcJ!F~Q=nEQ?qq`rRJZQ>o?ihYztXf| z|2bagn|hGp$m6h|b*g<9XV<^I4g=r_aiT+`E|-xLjq2T$U4LEqVCmKuOFc z4fyHFyoj+IcizsH-}!#sICeDZHSi3pD_EBZ?C50IB~W; zE(NW6tI~bcVw(3+3icM#veqm4W5pqwx_(7H+7t*TIUEdD)Vj6yUJSUNLf^}nRl$qy1BIsm=!@$L$hYs0Y7OGPTo3_NG5!eZ^ ze(&+I;1nCapr{0(&V$Ryd<{&{qDBSZ=Je}x)J2M(-3(Q#mm-~HafCd6`0mKv{;yQNEb)p!-!1h9f#Rx9-)#t-60GJKy zO?G8E;)8}2E8JMRWHvCXH!8!j8q))rw z{D0>d)Px7W^06Yj`N22NfPfUC+k#76v7tT*aL!@=ig$g#7tZuTPFzStyY<+1VOM^9?N`u5N|MS zgs;3)6L6I}z~zbuvo&F6V1cl0p$oyF+es@D0=xu1@cT{^Wj0{NZh#S`X(ZgAp=A%N zN{m*%0#DYPC0tiD1YEzfaZ>pScvZ0D5Y`ibZsR=|HEX=+8r%T<=cMAh=LevNj1C~R z?|Ie3=wLdH$3h#O#B@Zh!L`G`;lf40;(w0ppo8!M53Tz})+tU1+hsZMbZ6VA_G~b) z)K2#ODSuBZ#QQ3^X-k*WA{f0axWmU=A2?zCufW%id3^KAz~4%MKt)Z!2Xe}LM^TuL zXfF7by0>g!8||KMwy7g8#RxZ+_zyqTq0oZ9xf_mPb^5`pVJqwTSRfZ9z}}M9oj`;2 z!d}09mX!{67(6|sXZ|renA}j>qM!dG5w`dk0ky6)4etVc+=b^ylE1%|bZWn%Ue1F= zDL+>df_(Q0^cxdhW-=-p(h(@ zYua^|i4JB4h_bQ#W(%B<0We!=VQc>*AY^d53-*Kx?H_#3hrpD3fC)z3Xi+L2;{ZYh zcg9@bbgcuJ(axQXInSl`qN|6IC~jkWNvH=9W7I)OC3-LtQ!q0Z*OkverB8ruzee>t z4V)45HP}DuL<71Am`|`JW1V+^3X%ra9Zn~0dp;uqjL!U-Ar1-t${q&$%@KT1xzMQt z7UCK(PM(W5bK7aF8MuTS%zfd3czH1s zJ=!%(IJ6po<4g4bmhALi+<(-PmIT_qXL3wWpI8$*mpLGX5SjW4g#fgMK^PgC;YBghDv zN-<7WY8KQAtZw{(#ZI19Rki4N!8-+ZMogk)gYE%{1Q@-gFwOl52NJG*I=G$%X+_g) zBB~E=vUjy7D&Y7Pe$j*5m&<}cDFp;dxWg(3!IkRZCf)fC$rf%jk3_};4a2xPv_z|b zpu4SDr2MoZa31T!irHGvfZh5{=*140t0^!AFY68*6g6Q-)-#boe@4po_BMpMq_yy# zrP-D}G}wIohdNXg4+O7Vm=<>&22HFX_sEY0AQdNK;mB@ozY4o6y)75;`cBPnfrelM z3-%lqivufM3z8S;J&6OMuh?4)8{vu#;0Sx3`rf`i2Qzb2!dWni-&&0jSU(H+G+v$Z z668C~fm?;X0zGENc2s?nKEWG&IO-3wM6N#Oh+v})WyI1mzt{YsW zQY`c%^1uP%@GBQ}&1g4ZzX!;(p&qo#lQmB1V;To(7nhTwP1Zudzo%=5OuV9jq8-e7 zjJqLS^^J%+w}xR-YdEBrA*6nAo|OeGnFx{>F?2UU9)rC#gu^O+A+O@C7nHdoX*2L4 zE);=Bob&jg9yCkM?zidy@^Z;;E5t+c*}nGklDbumtb$$OfVZYMVi%T_I}{KB$Ar8- z0?j8SPUvaFs*F~hvm}i)?OAnh)cKre1fmaoT@F}8+7-Ovd>>d;FLr{Hd|U0=i?wyN zwH<&vf~nNn+B&LWvG0ej=mrFvTZJ1_ce;i=p*g&8^X5%Dq$q@Px&qiUqksT`QrZ}G zG9VBK0sanZnwIV3r@vluH~un(^~W}Dquf+ji+6SHzbb_6Xn0Ov-aa-?fdqgmeNJ)I zIJnuW z(G{J`!NKsKQQ88dw!ota7CiVyrtROXiEVd(78D zhDz=A90+?h1Fzwyb>yKKs)z==Nebr2)_t%ke?uvAn zvf)NsSR>Bwv>x5K6z2L?d|7yF_YaS+gJ(HRW`?pOsRemYie|riOZ#oAdeUr-o~Tr{ z?jeWbdF3Ne15OiNdu|?|(Ugj%j$;CYPQtcMPPxOi-Y)jSX;@>Fp{E!j%v+y$&6AqB zP4{f4MLsC)ij@{0{rKZ zQLf!*o#HBuK{V-XC@Pz`o~?kG8|VMv&}`{`Bj@VLf}}|xE>>1nWm?5H!zcly0vfvF zxU>{88?s=C^P|oOEwsPw9DpOzWh9mOU?0LlgM0h6-9kuT*f*X}iYOX%600;*QaX{C ziBCzdglIcW$q?W3^UJJ4&Q-=wla#IvCx;e)?7?2J?H1Weu^aP5VIn3#GsWxqkC6(o z#4R4L$QDUd;G4*($fCV(720qJHmtxf4s7Sa=MD}9rxDjB|q>#8DcSa4d1S3fP;q_ zgp62G##&y9#tK2&!ITw$m~m>czX_)7PI%+bFVBrU4Gnt-{E`)gFNguJftEp5a8NYX zWw8yba>312&<_?{I}M_{gKd%Za-VIO0@bgaJl?LU(cmuoEO;V8ZsR)#GK^T^uXnfl zh=QTr7Yi`jNN8=HZ_1Xy;IYl}`LhqYSm?&R(Q9AS)z2nsxs6q4M(lu6PtM1WifWkl z_KO}e1Y3raK`TA)zf2^09ST1LtVT>|SJM3||F*dAHRP6J|LJ)?Aj~>vp=P1(A;xIl zWfLz%2u+|D+3p=3y~Y1cQY)3&Gm+9AcOY7ICk`5>X zl=T)%0Wnfq9&GW!@-j zOw7f-7VuOv*IOJN9cA9&2jbal?pKrg@W_d0jonDjWZ+r-^F}(AjGsPz;(}_O>>1v^ z`>qJ3?&;X8@(VP>DFa*H-N)sr#tRKzwP}0ypG!cq%cWaDr_jIUZlF%TH7--7U+&~X zf`fRHAN~5q-MRR8oaLn4iSq(}(-dv9A*DzDJ7*b3 zfKgv+{LYt>iLHKj759-nLOW;jC5*he2V)ld=e>c&x~7AR76Oz%r`jo$;J%@uqzy~u zhEE&>{k4LXpecM2 z@6-aP$q>-ge|7?poM0qPqj-4>vj78*$MVO_2NjNQ6;*NW{*_? z5vA)IN1@1!d72%56&>4CrV9)!i?+SQzlIUbNQz7WURmnI#DrjXQa;#w1ERnbA2Qzs ztw{e&S*Q*@l>GXJ-c@e(Y})S31D&^64y&`k1N&!)G{^8v2&JBnwxPR0CAg&NvKJ!= za%e7Z_rzVde+@BNtf!~v)-_P0DNQW=t@KOdaF~uWnCct9m=6DIee;|kuCA_*xDs1> zsF09QPGMo;yVs|Dfss59JXR*;`kDU@5M=dq8xi+nJV^r?S5a)OYG92q`|ZpBc>|z8 z&x_6rQh-jpeJNTn`8D6I6CjW|4Sx4S_!bED|0{HXmvl`3WUL-Ppk)Moe40RxG*ss} zc4v(?3CK^U`?~ zP(wNyviVe0&A{7v2Bbb)qI0Ab&g7VeIC2 z)_|;VjTXLVJ<0^Jq`8y@bEN|R9jj;+6qx^xg}z+ojXxgsP9Q&+KIC;U+9K9}zy60; zq1d5!9XGfTO@Bry3PLGV1>L{L$OX3cx_Tt)yrew`^g8f&QA*Em9r^caT+|8M8W|mp zyk%ng=h`^n-#G>SYm#+DK?%+@&^A%>HO-2OvVrp@yKDcg)N~9ONivJ=SD99``z@2` zkzBxc&TV8&`mfx9;h%!Y$;suQ(dc=H>syuzV41)nBU($qY5VW3PM&y3A^V~IDPhdK zOcv7SSm40F5>!`ePMB-|L_Oy@2ny%DfscNC4K$LyQ2v==COxcCq5Fm8zxIx3tQ0cC zEJ$w~lw~+7WaOSwalH*7VVG2mMMa`Y}QZ{`&i4 zDpk$0&UNSq2+()o7A>R@Jc&7d`zs7wbsw}TsikT+I9?sbOa*2LmV?^0f8|2VvMdgx zj2#HL*iG5zjEN6w@kl;n|HQHiz-Csdzs8A1#FFt@H8j?AvqQzh3W;=#GCH7wZ_)lw9S>?F5Q{=);Op)VxZRv9MFV9m!>rHakOuIqX`=8yC+*)fS z%JuThU&uGz*79mL+5`2QQya)`t>5z}vWyb08*LX`?q?*c`Cha9wnYGHL%6;}@L#)F z$yg_U_wTMeJ8TI(8gYxE{DUq^M9p-F2Uo9ZeK2J?}I1Y~68XvHRG-8it5b z_uMYp>2#ks1^RhTUq?LJ9lEPZ>~i$PrNg0R4>UYyi(Ut9d7>Xx7c9f0}P5oD9xUo8L{nf~9|?0*D=tI8obg z>SIr9sf-TycPb7}WMJy>IB(&gMe8uy?XP^&svsHt>*j6w^3oS|UG>z?DF!BiV;no$ zqZBhd!24l9<=k~5MdXjZ%THhw?tU2g3`TSQ z;J?L1Mf@R&V6X6Ijc2^j_qB*QKC%xqDBam!!FtGiUZZY|i<@HZF7?sB7n10-X&QUC z&l-S9i>+w?Hhu~=68Iv(Pmi6%e?xp1&8uzlVph9u|BC05TscFM13gCd|Fm=M|4e^> zJj`t>lBJ6tE+w}VQOa!*pH)h3YhASIf+SNCpT?~DQ10}3(1mOwA(xe6X)a5`m*rA2 z2@~05n^r33ZG6w_Q(t|)|3dq9@AG({bKd7X&+GMiKA*?=^t+laKNq77ExA)+FTI14 z45Q+*_FjcR`u#ehXC4l^U4w;<_XPk5QDoI%*N8KK!zkf{W)8mVfDy;NH<@!isEuO7 z%a_c}c0|Zwjw{H69lfFAFcAlyDAN<;f~%kL z*PWht0*^HBXVf*laq4te_T2*842w#Cg%(>V&9vP3s4ySN)D12+&-KZl1?qknjSqUE zt#SXG@x{S?7TE5LyA(wm9pi)ZJ5WUg9DY5CgXiw(-~xg1U2;zP1%|F#80(gn<{N4X zvYKet>*q4WNjEYBh$(B}GcR}o1+g7Ed)`S4swDuP&5rne$_7@`tV?qn6~7|j!i=S3 zM}1n9M?xICIW$D&?AWJ-ToROGV|SJoO*FQ)ItD~%e6Q_p^1QMtzoZMoYn^4T>;37R z4Ux@xVx&r8su@Es1T6neK7enCecobdy!?RP@`Cqk3km15!} zvtLT(u7JH^*%bp$;JuJ@gTw6)qzwh(SJsaEvWfgt1+&meXI9B0bmk1wZX&dkF%nZk z(y`jhsd(CdQN&*Crro>1EzvmI1Ti&2nN*3q-~CY@(WhdK!lfiawZ2~bVwlMxn9+sh zWU4yK=a;EhZ~;@|(&zHdMZl{9t4f^-zUN7LCNA`nGT*STK6jizznPY7ED|a>c^mOZ zU=bbF61@T9x={>2L#nbcnM|_cQLkrt{#NZAE57 zQsZI9jF63P*jEH)XlRzCdv;S$gZ|Ht_xutasmb!%saD{y;toFfOODdPM*AXDji9mv zrWIR5QBdwzh?S6{s9<|Fj7&TE)o!wK+4yaY24fg%RWQY0))W2ZG5!=5)l(`Db^?F> z>(eHm8H*R1yu+j-4MB}?^gI`+A%1+kv_RQ}9uNv2WoiL?Om(^h)FFANX3GwsLuWJ1 zsVI@qsep5axXWBXd7^4PjD=rE(DOIR%{WigPeTBG2%BXeuK0NQDHRu6z&p$f`wTkN41i7Nbc;VU4I%I3$v4-`FI`PbTlM znK>qrQU4zukqNAEc;2C`)+|e=4-i@txwuDXj$W(+ksdaKs61=?(9mDh`rIPLczU;H z9k3VV-ZDm%=fl6gPC+0z3>tms;N3%#2XCkeTAdpWdL-4}_U(Am3#ofH7qwe?X3mm75An`1Xj++>~s_Ufw5P zU-r&%f}34Nrs>Q)SQ1puF%)H!g=)yaUf=&r-u`63Ca2M%cu%0(AY&o!OM_t zk5XLy8Rsh~r7RK`aDc91CIqx_R&(Px95H~Gzkmkh=JdV#0lbD`AamnThMv?qjlF&7 zhP*IHMWx-aw=`K{l|(i19(1j!b&umA`of;Dk3;%EPyIoYTS&XsY77oD31+2&^8o4I zr(3y?Wu<2IJY<{_YHJcgRX+YFk=kf63!2l?#=qF;P9@NE;jUFlI!; z<#N4Tx4M#P%DXM+z6>cQNHwsJ8s2k)qiikfOTzn-E}H$>yS8Ipw6Q34x%n~X@p7J` z3CY`QGqR8tqSr@k+W7G4Y0p0#{F^pew9~h}CE4F)PFg6tlwzz&K#%l1e;?Acio13k zM>vad#=!1`ENMESA&Xp>777@*ABeSpT%x}9{8+ld=Wt;!Is~3D<{~Km6qQ-l;~8^! zGPk(&h{WxrWsS$&=$=|V!8nmWwj-9S;l9Cu@TH{fJusu6W_GB5YFT{UG9X$!wup^V zFH|3in4G*G&XF*0+J%>O6bPyU@vRq&`MaO9eYqt3i-vf=hPbx_T2E78Q=HB}E@hT$ zw48On+MtTTR^@L$9@~E)Cpj5->sLDS(Ksf!OeSvA{(SrP=?TEfMW{jZLRcNOr80=2 zmn$S8j<~1qlWLR43hZK$>uf{i$X!ytcEEYLl;!%7V3K>)-NtHTYG-56{o$sWK%i(a z^~Jw2X+;t80q$@npv!utEsNB5 ztxu9(k-%%r*drw{f0iO%>*{pUaw9Ckgn(;$XQmFk_*YF$I#uT2pN0+#v-x4%x1s)S w^LyT;<%8er;P)K-UI*Xd@%LW%wio8KXQ+o7ixqDcE&#t>&aO^njy}=<1>9XoLjV8( literal 0 HcmV?d00001 diff --git a/docs/user_docs/backup-and-restore/backup/backup-repo.md b/docs/user_docs/backup-and-restore/backup/backup-repo.md index 395a496c0a9..73246bc8624 100644 --- a/docs/user_docs/backup-and-restore/backup/backup-repo.md +++ b/docs/user_docs/backup-and-restore/backup/backup-repo.md @@ -130,7 +130,7 @@ If you do not configure the BackupRepo information when installing KubeBlocks, y --provider oss \ --region cn-zhangjiakou \ --bucket test-kb-backup \ - # --endpoint oss-cn-zhangjiakou-internal.aliyuncs.com \ To display the specified oss endpoint + # --endpoint https://oss-cn-zhangjiakou-internal.aliyuncs.com \ To display the specified oss endpoint --access-key-id \ --secret-access-key \ --default diff --git a/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md b/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md index f81f561538c..310c51c6db9 100644 --- a/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md @@ -9,6 +9,8 @@ sidebar_position: 1 The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. + ## View parameter information View the current configuration file of a cluster. @@ -62,7 +64,9 @@ You can also view the details of this configuration file and parameters. * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter reconfiguration takes effect. Currerntly, Kafka only supports static strategy, i.e. `Dynamic` is `false`. Restarting is required to make reconfiguration effective since using kbcli to configure parameters triggers broker restarting. * Description: It describes the parameter definition. -## Reconfigure static parameters +## Reconfigure parameters with configure command + +### Reconfigure static parameters Static parameter reconfiguring requires restarting the pod. @@ -146,6 +150,43 @@ Static parameter reconfiguring requires restarting the pod. mykafka-reconfiguring-wvqns mykafka broker kafka-configuration-tpl server.properties Succeed restart 1/1 Sep 14,2023 16:28 UTC+0800 {"server.properties":"{\"log.cleanup.policy\":\"compact\"}"} ``` +## Reconfigure parameters with edit-config command + +For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. + +For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. + +1. Edit the configuration file. + + ```bash + kbcli cluster edit-config mykafka + ``` + +:::note + +If there are multiple components in a cluster, use `--component` to specify a component. + +::: + +2. View the status of the parameter reconfiguration. + + ```bash + kbcli cluster describe-ops xxx -n default + ``` + +3. Connect to the database to verify whether the parameters are modified + + ```bash + kbcli cluster connect mykafka + ``` + +:::note + +1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. +2. Deleting a parameter will be supported in later version. + +::: + ## View history and compare differences After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. diff --git a/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md b/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md index bd2d8fc7ef6..2d82626abbc 100644 --- a/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md @@ -9,6 +9,8 @@ sidebar_position: 1 The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. + ## View parameter information View the current configuration file of a cluster. @@ -36,7 +38,7 @@ You can also view the details of this configuration file and parameters. kbcli cluster describe-config mongodb-cluster --show-detail ``` -## Reconfigure parameters +## Reconfigure parameters with --set flag The example below reconfigures velocity to 1. @@ -76,3 +78,40 @@ The example below reconfigures velocity to 1. root@mongodb-cluster-mongodb-0:/# cat etc/mongodb/mongodb.conf |grep verbosity verbosity: "1" ``` + +## Reconfigure parameters with edit-config + +For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. + +For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. + +1. Edit the configuration file. + + ```bash + kbcli cluster edit-config mongodb-cluster + ``` + +:::note + +If there are multiple components in a cluster, use `--component` to specify a component. + +::: + +2. View the status of the parameter reconfiguration. + + ```bash + kbcli cluster describe-ops xxx -n default + ``` + +3. Connect to the database to verify whether the parameters are modified + + ```bash + kbcli cluster connect mongodb-cluster + ``` + +:::note + +1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. +2. Deleting a parameter will be supported in later version. + +::: diff --git a/docs/user_docs/kubeblocks-for-mongodb/migration/feature-and-limit-list-mongodb.md b/docs/user_docs/kubeblocks-for-mongodb/migration/feature-and-limit-list-mongodb.md index 5ed72224880..8bfc5ae2405 100644 --- a/docs/user_docs/kubeblocks-for-mongodb/migration/feature-and-limit-list-mongodb.md +++ b/docs/user_docs/kubeblocks-for-mongodb/migration/feature-and-limit-list-mongodb.md @@ -1,6 +1,6 @@ --- title: Full feature and limit list -description: The full feature and limit list of KubeBlocks migration function for MOngoDB +description: The full feature and limit list of KubeBlocks migration function for MongoDB keywords: [mongodb, migration, migrate data in MongoDB to KubeBlocks, full feature, limit] sidebar_position: 1 sidebar_label: Full feature and limit list diff --git a/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md b/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md index f43767d0259..19d05dac252 100644 --- a/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md @@ -9,6 +9,8 @@ sidebar_position: 1 The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +From v0.6.0, KubeBlocks supports both `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. + ## View parameter information View the current configuration file of a cluster. @@ -30,7 +32,7 @@ You can also view the details of this configuration file and parameters. * View the parameter description. ```bash - kbcli cluster explain-config mysql-cluster |head -n 20 + kbcli cluster explain-config mysql-cluster | head -n 20 ``` * View the user guide of a specified parameter. @@ -64,7 +66,9 @@ You can also view the details of this configuration file and parameters. * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make reconfiguration effective. Follow the instructions in [Reconfigure static parameters](#reconfigure-static-parameters). * Description: It describes the parameter definition. -## Reconfigure dynamic parameters +## Reconfigure parameters with --set flag + +### Reconfigure dynamic parameters The example below reconfigures `max_connection` and `innodb_buffer_pool_size`. @@ -184,7 +188,7 @@ The example below reconfigures `max_connection` and `innodb_buffer_pool_size`. 1 row in set (0.00 sec) ``` -## Reconfigure static parameters +### Reconfigure static parameters Static parameter reconfiguring requires restarting the pod. The following example reconfigures `ngram_token_size`. @@ -285,6 +289,46 @@ Static parameter reconfiguring requires restarting the pod. The following exampl 1 row in set (0.09 sec) ``` +## Reconfigure parameters with edit-config + +For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. + +For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. + +The following steps take configuring MySQL Standalone as an example. + +1. Edit the configuration file. + + ```bash + kbcli cluster edit-config mysql-cluster --config-spec=mysql-consensusset-config + ``` + +:::note + +* ApeCloud MySQL currently supports multiple configuration templates, so `--config-spec` is required. +* If there are multiple components in a cluster, use `--component` to specify a component. + +::: + +2. View the status of the parameter reconfiguration. + + ```bash + kbcli cluster describe-ops xxx -n default + ``` + +3. Connect to the database to verify whether the parameters are modified + + ```bash + kbcli cluster connect mysql-cluster + ``` + +:::note + +1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. +2. Deleting a parameter will be supported in later version. + +::: + ## View history and compare differences After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. diff --git a/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md b/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md index f02b4c8cd44..0911e788bc6 100644 --- a/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md @@ -7,7 +7,9 @@ sidebar_position: 1 # Configure cluster parameters -The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. + +From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. ## View parameter information @@ -30,7 +32,7 @@ You can also view the details of this configuration file and parameters. * View the parameter description. ```bash - kbcli cluster explain-config pg-cluster |head -n 20 + kbcli cluster explain-config pg-cluster | head -n 20 ``` * View the user guide of a specified parameter. @@ -63,7 +65,9 @@ You can also view the details of this configuration file and parameters. * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make reconfiguration effective. Follow the instructions in [Reconfigure static parameters](#reconfigure-static-parameters). * Description: It describes the parameter definition. -## Reconfigure dynamic parameters +## Reconfigure parameters with config command + +### Reconfigure dynamic parameters The example below reconfigures `max_connections`. @@ -158,7 +162,7 @@ The example below reconfigures `max_connections`. (1 row) ``` -## Reconfigure static parameters +### Reconfigure static parameters The example below reconfigures `shared_buffers`. @@ -251,6 +255,43 @@ The example below reconfigures `shared_buffers`. (1 row) ``` +## Reconfigure parameters with edit-config command + +For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. + +For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. + +1. Edit the configuration file. + + ```bash + kbcli cluster edit-config pg-cluster + ``` + +:::note + +If there are multiple components in a cluster, use `--component` to specify a component. + +::: + +2. View the status of the parameter reconfiguration. + + ```bash + kbcli cluster describe-ops xxx -n default + ``` + +3. Connect to the database to verify whether the parameters are modified + + ```bash + kbcli cluster connect pg-cluster + ``` + +:::note + +1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. +2. Deleting a parameter will be supported in later version. + +::: + ## View history and compare differences After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. diff --git a/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md b/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md index 3636237886b..ab7f997a21d 100644 --- a/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md @@ -7,6 +7,8 @@ sidebar_position: 4 # Configure cluster parameters +From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. + There are 3 types of parameters: 1. Environment parameters, such as GC-related parameters, `PULSAR_MEM`, and `PULSAR_GC`, changes will apply to all components; @@ -196,6 +198,43 @@ Static parameter reconfiguring requires restarting the pod. The following exampl OBJECT-KEY STATUS DURATION MESSAGE ``` +## Reconfigure parameters with edit-config + +For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. + +For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. + +1. Edit the configuration file. + + ```bash + kbcli cluster edit-config pulsar + ``` + +:::note + +If there are multiple components in a cluster, use `--component` to specify a component. + +::: + +2. View the status of the parameter reconfiguration. + + ```bash + kbcli cluster describe-ops xxx -n default + ``` + +3. Connect to the database to verify whether the parameters are modified + + ```bash + kbcli cluster connect pulsar + ``` + +:::note + +1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. +2. Deleting a parameter will be supported in later version. + +::: + ## Reconfigure with kubectl Using kubectl to reconfigure pulsar cluster requires modifying the configuration file. diff --git a/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md b/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md index 4b2a0ff4436..ad5b3966ad4 100644 --- a/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md @@ -9,6 +9,8 @@ sidebar_position: 1 The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. + ## View parameter information View the current configuration file of a cluster. @@ -63,7 +65,9 @@ You can also view the details of this configuration file and parameters. * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make reconfiguration effective. Follow the instructions in [Reconfigure static parameters](#reconfigure-static-parameters). * Description: It describes the parameter definition. -## Reconfigure dynamic parameters +## Reconfigure parameters with --set flag + +### Reconfigure dynamic parameters The example below reconfigures `acllog-max-len`. @@ -155,7 +159,7 @@ The example below reconfigures `acllog-max-len`. 2) "256" ``` -## Reconfigure static parameters +### Reconfigure static parameters The example below reconfigures `maxclients` and `databases`. @@ -252,6 +256,43 @@ The example below reconfigures `maxclients` and `databases`. 4) "20000" ``` +## Reconfigure parameters with edit-config + +For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. + +For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. + +1. Edit the configuration file. + + ```bash + kbcli cluster edit-config redis-cluster + ``` + +:::note + +If there are multiple components in a cluster, use `--component` to specify a component. + +::: + +2. View the status of the parameter reconfiguration. + + ```bash + kbcli cluster describe-ops xxx -n default + ``` + +3. Connect to the database to verify whether the parameters are modified + + ```bash + kbcli cluster connect redis-cluster + ``` + +:::note + +1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. +2. Deleting a parameter will be supported in later version. + +::: + ## View history and compare differences After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. diff --git a/docs/user_docs/kubeblocks-for-vector-database/manage-vector-databases.md b/docs/user_docs/kubeblocks-for-vector-database/manage-vector-databases.md index 0e77cad4c6c..ba577077b31 100644 --- a/docs/user_docs/kubeblocks-for-vector-database/manage-vector-databases.md +++ b/docs/user_docs/kubeblocks-for-vector-database/manage-vector-databases.md @@ -39,7 +39,7 @@ Before you start, [Install KubeBlocks](./../installation/install-with-helm/) and 3. Check the cluster information. ```bash - kblci cluster describe qdrant + kbcli cluster describe qdrant > Name: qdrant Created Time: Aug 15,2023 23:03 UTC+0800 NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY @@ -73,11 +73,17 @@ Before you start, [Install KubeBlocks](./../installation/install-with-helm/) and ## Connect to a vector database cluster -Use the following command to connect to a vector database cluster. +Qdrant provides both HTTP and gRPC protocols for client access on ports 6333 and 6334 respectively. Depending on where the client is, different connection options are offered to connect to the Qdrant cluster. -```bash -kbcli cluster connect --namespace -``` +:::note + +If your cluster is on AWS, install the AWS Load Balancer Controller first. + +::: + +- If your client is inside a K8s cluster, run `kbcli cluster describe qdrant` to get the ClusterIP address of the cluster or the corresponding K8s cluster domain name. +- If your client is outside the K8s cluster but in the same VPC as the server, run `kbcli cluster expose qdant --enable=true --type=vpc` to get a VPC load balancer address for the database cluster. +- If your client is outside the VPC, run `kbcli cluster expose qdant --enable=true --type=internet` to open a public network reachable address for the database cluster. ## Monitor the vector database From 1b14e9ce7b3ba7f2cbc79e2ee35abc4731a277a7 Mon Sep 17 00:00:00 2001 From: a le <101848970+1aal@users.noreply.github.com> Date: Thu, 21 Sep 2023 19:33:10 +0800 Subject: [PATCH 13/58] feat: support huawei cloud and set provide (#4996) --- apis/extensions/v1alpha1/addon_types.go | 5 ++- apis/extensions/v1alpha1/type.go | 3 +- .../extensions.kubeblocks.io_addons.yaml | 18 ++++++---- .../crds/extensions.kubeblocks.io_addons.yaml | 18 ++++++---- deploy/helm/templates/_helpers.tpl | 17 ++++++++-- .../addons/snapshot-controller-addon.yaml | 7 +++- deploy/helm/templates/deployment.yaml | 2 ++ deploy/helm/templates/storageclass.yaml | 33 +++++++++++++++---- deploy/helm/values.yaml | 25 +++++++++----- internal/constant/const.go | 1 + 10 files changed, 97 insertions(+), 32 deletions(-) diff --git a/apis/extensions/v1alpha1/addon_types.go b/apis/extensions/v1alpha1/addon_types.go index eb1c18b5d2f..bbee2108b9a 100644 --- a/apis/extensions/v1alpha1/addon_types.go +++ b/apis/extensions/v1alpha1/addon_types.go @@ -92,9 +92,10 @@ type InstallableSpec struct { } type SelectorRequirement struct { - // The selector key. Valid values are KubeVersion, KubeGitVersion. + // The selector key. Valid values are KubeVersion, KubeGitVersion and KubeProvider. // "KubeVersion" the semver expression of Kubernetes versions, i.e., v1.24. // "KubeGitVersion" may contain distro. info., i.e., v1.24.4+eks. + // "KubeProvider" the Kubernetes provider, i.e., aws,gcp,azure,huaweiCloud,tencentCloud etc. // +kubebuilder:validation:Required Key AddonSelectorKey `json:"key"` @@ -494,6 +495,8 @@ func (r SelectorRequirement) MatchesFromConfig() bool { l = ver.GitVersion case KubeVersion: l = fmt.Sprintf("%s.%s", ver.Major, ver.Minor) + case KubeProvider: + l = viper.GetString(constant.CfgKeyProvider) } return r.matchesLine(l) } diff --git a/apis/extensions/v1alpha1/type.go b/apis/extensions/v1alpha1/type.go index a213763a559..2c039ac17ad 100644 --- a/apis/extensions/v1alpha1/type.go +++ b/apis/extensions/v1alpha1/type.go @@ -51,12 +51,13 @@ const ( // AddonSelectorKey are selector requirement key types. // +enum -// +kubebuilder:validation:Enum={KubeGitVersion,KubeVersion} +// +kubebuilder:validation:Enum={KubeGitVersion,KubeVersion,KubeProvider} type AddonSelectorKey string const ( KubeGitVersion AddonSelectorKey = "KubeGitVersion" KubeVersion AddonSelectorKey = "KubeVersion" + KubeProvider AddonSelectorKey = "KubeProvider" ) const ( diff --git a/config/crd/bases/extensions.kubeblocks.io_addons.yaml b/config/crd/bases/extensions.kubeblocks.io_addons.yaml index f170da7b08c..6c45a4f8f22 100644 --- a/config/crd/bases/extensions.kubeblocks.io_addons.yaml +++ b/config/crd/bases/extensions.kubeblocks.io_addons.yaml @@ -172,12 +172,15 @@ spec: properties: key: description: The selector key. Valid values are KubeVersion, - KubeGitVersion. "KubeVersion" the semver expression - of Kubernetes versions, i.e., v1.24. "KubeGitVersion" - may contain distro. info., i.e., v1.24.4+eks. + KubeGitVersion and KubeProvider. "KubeVersion" the semver + expression of Kubernetes versions, i.e., v1.24. "KubeGitVersion" + may contain distro. info., i.e., v1.24.4+eks. "KubeProvider" + the Kubernetes provider, i.e., aws,gcp,azure,huaweiCloud,tencentCloud + etc. enum: - KubeGitVersion - KubeVersion + - KubeProvider type: string operator: description: "Represents a key's relationship to a set @@ -562,12 +565,15 @@ spec: properties: key: description: The selector key. Valid values are KubeVersion, - KubeGitVersion. "KubeVersion" the semver expression of - Kubernetes versions, i.e., v1.24. "KubeGitVersion" may - contain distro. info., i.e., v1.24.4+eks. + KubeGitVersion and KubeProvider. "KubeVersion" the semver + expression of Kubernetes versions, i.e., v1.24. "KubeGitVersion" + may contain distro. info., i.e., v1.24.4+eks. "KubeProvider" + the Kubernetes provider, i.e., aws,gcp,azure,huaweiCloud,tencentCloud + etc. enum: - KubeGitVersion - KubeVersion + - KubeProvider type: string operator: description: "Represents a key's relationship to a set of diff --git a/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml b/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml index f170da7b08c..6c45a4f8f22 100644 --- a/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml +++ b/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml @@ -172,12 +172,15 @@ spec: properties: key: description: The selector key. Valid values are KubeVersion, - KubeGitVersion. "KubeVersion" the semver expression - of Kubernetes versions, i.e., v1.24. "KubeGitVersion" - may contain distro. info., i.e., v1.24.4+eks. + KubeGitVersion and KubeProvider. "KubeVersion" the semver + expression of Kubernetes versions, i.e., v1.24. "KubeGitVersion" + may contain distro. info., i.e., v1.24.4+eks. "KubeProvider" + the Kubernetes provider, i.e., aws,gcp,azure,huaweiCloud,tencentCloud + etc. enum: - KubeGitVersion - KubeVersion + - KubeProvider type: string operator: description: "Represents a key's relationship to a set @@ -562,12 +565,15 @@ spec: properties: key: description: The selector key. Valid values are KubeVersion, - KubeGitVersion. "KubeVersion" the semver expression of - Kubernetes versions, i.e., v1.24. "KubeGitVersion" may - contain distro. info., i.e., v1.24.4+eks. + KubeGitVersion and KubeProvider. "KubeVersion" the semver + expression of Kubernetes versions, i.e., v1.24. "KubeGitVersion" + may contain distro. info., i.e., v1.24.4+eks. "KubeProvider" + the Kubernetes provider, i.e., aws,gcp,azure,huaweiCloud,tencentCloud + etc. enum: - KubeGitVersion - KubeVersion + - KubeProvider type: string operator: description: "Represents a key's relationship to a set of diff --git a/deploy/helm/templates/_helpers.tpl b/deploy/helm/templates/_helpers.tpl index f2b4e59c750..6bf35fb47bc 100644 --- a/deploy/helm/templates/_helpers.tpl +++ b/deploy/helm/templates/_helpers.tpl @@ -283,6 +283,14 @@ TODO: For azure, we should get provider from node.Spec.ProviderID */}} {{- define "kubeblocks.cloudProvider" }} {{- $kubeVersion := .Capabilities.KubeVersion.GitVersion }} +{{- $validProviders := .Values.validProviders}} +{{- $provider := .Values.provider }} +{{- $valid := false }} +{{- range $validProviders }} + {{- if eq . $provider }} + {{- $valid = true }} + {{- end }} +{{- end }} {{- if contains "-eks" $kubeVersion }} {{- "aws" -}} {{- else if contains "-gke" $kubeVersion }} @@ -293,11 +301,16 @@ TODO: For azure, we should get provider from node.Spec.ProviderID {{- "tencentCloud" -}} {{- else if contains "-aks" $kubeVersion }} {{- "azure" -}} -{{- else }} -{{- "" -}} +{{- else if $valid }} +{{- $provider }} +{{- else}} +{{- $invalidProvider := join ", " .Values.validProviders }} +{{- $errorMessage := printf "Warning: Your provider is invalid. Please use one of the following: %s" $invalidProvider | trimSuffix ", " }} +{{- fail $errorMessage}} {{- end }} {{- end }} + {{/* Define default storage class name, if cloud provider is known, specify a default storage class name. */}} diff --git a/deploy/helm/templates/addons/snapshot-controller-addon.yaml b/deploy/helm/templates/addons/snapshot-controller-addon.yaml index 615ca462a5a..e80887f5ac1 100644 --- a/deploy/helm/templates/addons/snapshot-controller-addon.yaml +++ b/deploy/helm/templates/addons/snapshot-controller-addon.yaml @@ -88,4 +88,9 @@ spec: operator: DoesNotContain values: - tke - - aliyun \ No newline at end of file + - aliyun + - key: KubeProvider + operator: DoesNotContain + values: + - huaweiCloud + - azure \ No newline at end of file diff --git a/deploy/helm/templates/deployment.yaml b/deploy/helm/templates/deployment.yaml index d41a3c09da6..494b9acea5d 100644 --- a/deploy/helm/templates/deployment.yaml +++ b/deploy/helm/templates/deployment.yaml @@ -113,6 +113,8 @@ spec: - name: RECOVER_VOLUME_EXPANSION_FAILURE value: "true" {{- end }} + - name: KUBE_PROVIDER + value: {{ .Values.provider | quote }} {{- with .Values.securityContext }} securityContext: {{- toYaml . | nindent 12 }} diff --git a/deploy/helm/templates/storageclass.yaml b/deploy/helm/templates/storageclass.yaml index 9e1d09992b7..4554a8ed784 100644 --- a/deploy/helm/templates/storageclass.yaml +++ b/deploy/helm/templates/storageclass.yaml @@ -12,8 +12,8 @@ metadata: allowVolumeExpansion: true parameters: ## parameters references: https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md - type: {{ .Values.storageClass.provider.eks.volumeType }} # io2, io1, gp3, gp2 are all SSD variant - "csi.storage.k8s.io/fstype": {{ .Values.storageClass.provider.eks.fsType | default "xfs" }} + type: {{ .Values.storageClass.provider.aws.volumeType }} # io2, io1, gp3, gp2 are all SSD variant + "csi.storage.k8s.io/fstype": {{ .Values.storageClass.provider.aws.fsType | default "xfs" }} provisioner: ebs.csi.aws.com reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer @@ -29,8 +29,8 @@ metadata: allowVolumeExpansion: true parameters: ## refer: https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/issues/617 - type: {{ .Values.storageClass.provider.gke.volumeType }} - csi.storage.k8s.io/fstype: {{ .Values.storageClass.provider.gke.fsType | default "xfs" }} + type: {{ .Values.storageClass.provider.gcp.volumeType }} + csi.storage.k8s.io/fstype: {{ .Values.storageClass.provider.gcp.fsType | default "xfs" }} provisioner: pd.csi.storage.gke.io reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer @@ -62,7 +62,7 @@ metadata: {{- include "kubeblocks.labels" . | nindent 4 }} parameters: ## parameters references: https://cloud.tencent.com/document/product/457/44239, the fsType is not supported by tke. - type: {{ .Values.storageClass.provider.tke.volumeType }} + type: {{ .Values.storageClass.provider.tencentCloud.volumeType }} reclaimPolicy: Delete provisioner: com.tencent.cloud.csi.cbs volumeBindingMode: WaitForFirstConsumer @@ -79,12 +79,31 @@ metadata: {{- include "kubeblocks.labels" . | nindent 4 }} parameters: # parameters references: https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md - fsType: {{ .Values.storageClass.provider.aks.fsType | default "xfs" }} - kind: {{ .Values.storageClass.provider.aks.volumeType }} + fsType: {{ .Values.storageClass.provider.azure.fsType | default "xfs" }} + kind: {{ .Values.storageClass.provider.azure.volumeType }} skuName: Standard_LRS provisioner: kubernetes.io/azure-disk reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer --- +{{- else if eq $cloudProvider "huaweiCloud" }} # huawei cloud +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ $scName }} + labels: + {{- include "kubeblocks.labels" . | nindent 4 }} +parameters: + # parameters references: https://support.huaweicloud.com/usermanual-cce/cce_10_0380.html#section3 + csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io + csi.storage.k8s.io/fstype: {{ .Values.storageClass.provider.huaweiCloud.fsType | default "xfs" }} + everest.io/disk-volume-type: {{ .Values.storageClass.provider.huaweiCloud.volumeType }} + everest.io/passthrough: "true" +provisioner: everest-csi-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +--- {{- end }} {{- end }} \ No newline at end of file diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml index 5ca5f110eb7..5aa7c353aae 100644 --- a/deploy/helm/values.yaml +++ b/deploy/helm/values.yaml @@ -1564,7 +1564,7 @@ grafana: # ingressClassName: nginx # Values can be templated annotations: {} - # kubernetes.io/ingress.class: nginx + # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" labels: {} path: / @@ -1754,36 +1754,46 @@ agamotto: registry: registry.cn-hangzhou.aliyuncs.com +provider: "" # cloud be "aws","gcp","aliyun","tencentCloud", "huaweiCloud", "azure" +validProviders: + - "aws" + - "gcp" + - "aliyun" + - "tencentCloud" + - "huaweiCloud" + - "azure" + - "" ## @section KubeBlocks default storageClass Parameters for cloud provider. storageClass: ## @param storageClass.name -- Specifies the name of the default storage class. ## If name is not specified and KubeBlocks deployed in a cloud, a default name will be generated. ## name: "" - ## @param storageClass.create -- Specifies whether the storage class should be created. If storageClass.name is not ## specified or generated, this value will be ignored. ## create: true - mountOptions: - noatime - nobarrier provider: - eks: + aws: volumeType: gp3 fsType: xfs - gke: + gcp: volumeType: pd-balanced fsType: xfs aliyun: volumeType: cloud_essd fsType: xfs - aks: + azure: volumeType: managed fsType: xfs - tke: + tencentCloud: volumeType: CLOUD_SSD + huaweiCloud: # Huawei Cloud + volumeType: SSD + fsType: ext4 external-dns: enabled: false @@ -1793,4 +1803,3 @@ external-dns: operator: Equal value: "true" effect: NoSchedule - diff --git a/internal/constant/const.go b/internal/constant/const.go index f7b37d54bd9..a7a53b07d62 100644 --- a/internal/constant/const.go +++ b/internal/constant/const.go @@ -34,6 +34,7 @@ const ( CfgKeyBackupPVConfigmapName = "BACKUP_PV_CONFIGMAP_NAME" // the configmap containing the persistentVolume template. CfgKeyBackupPVConfigmapNamespace = "BACKUP_PV_CONFIGMAP_NAMESPACE" // the configmap namespace containing the persistentVolume template. CfgRecoverVolumeExpansionFailure = "RECOVER_VOLUME_EXPANSION_FAILURE" // refer to feature gates RecoverVolumeExpansionFailure of k8s. + CfgKeyProvider = "KUBE_PROVIDER" // addon config keys CfgKeyAddonJobTTL = "ADDON_JOB_TTL" From fb1023327ed59cd61b7468e235e0b46dfe2a8a6c Mon Sep 17 00:00:00 2001 From: Shanshan Date: Thu, 21 Sep 2023 20:04:29 +0800 Subject: [PATCH 14/58] fix: kbcli report with sc and pvc (#5225) --- .../apps/configuration/configuration_controller.go | 4 ++-- internal/cli/cmd/report/report.go | 8 ++++++++ internal/controller/plan/prepare.go | 3 ++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/controllers/apps/configuration/configuration_controller.go b/controllers/apps/configuration/configuration_controller.go index 28d4a1d56ec..785d4c3f5fe 100644 --- a/controllers/apps/configuration/configuration_controller.go +++ b/controllers/apps/configuration/configuration_controller.go @@ -137,7 +137,7 @@ func (r *ConfigurationReconciler) runTasks( nil, fetcher.ClusterVerComObj) if err != nil { - return + return err } revision := strconv.FormatInt(configuration.GetGeneration(), 10) @@ -163,7 +163,7 @@ func (r *ConfigurationReconciler) runTasks( errs = append(errs, err) } if len(errs) == 0 { - return + return nil } return utilerrors.NewAggregate(errs) } diff --git a/internal/cli/cmd/report/report.go b/internal/cli/cmd/report/report.go index 803dab4118a..183765e4949 100644 --- a/internal/cli/cmd/report/report.go +++ b/internal/cli/cmd/report/report.go @@ -348,6 +348,8 @@ func (o *reportKubeblocksOptions) handleManifests(ctx context.Context) error { resourceLists = append(resourceLists, cliutil.ListResourceByGVR(ctx, o.genericClientSet.dynamic, o.namespace, scopedgvrs, []metav1.ListOptions{o.kubeBlocksSelector}, &allErrors)...) // get global resources resourceLists = append(resourceLists, cliutil.ListResourceByGVR(ctx, o.genericClientSet.dynamic, metav1.NamespaceAll, globalGvrs, []metav1.ListOptions{o.kubeBlocksSelector}, &allErrors)...) + // get all storage class + resourceLists = append(resourceLists, cliutil.ListResourceByGVR(ctx, o.genericClientSet.dynamic, metav1.NamespaceAll, []schema.GroupVersionResource{types.StorageClassGVR()}, []metav1.ListOptions{{}}, &allErrors)...) if err := o.reportWritter.WriteObjects(manifestsFolder, resourceLists, o.outputFormat); err != nil { return err } @@ -492,8 +494,13 @@ func (o *reportClusterOptions) handleManifests(ctx context.Context) error { types.BackupPolicyGVR(), types.BackupToolGVR(), types.RestoreJobGVR(), + types.PVCGVR(), + } + globalGvrs = []schema.GroupVersionResource{ + types.PVGVR(), } ) + var err error if o.cluster, err = o.genericClientSet.kbClientSet.AppsV1alpha1().Clusters(o.namespace).Get(ctx, o.clusterName, metav1.GetOptions{}); err != nil { return err @@ -508,6 +515,7 @@ func (o *reportClusterOptions) handleManifests(ctx context.Context) error { resourceLists := make([]*unstructured.UnstructuredList, 0) // write manifest resourceLists = append(resourceLists, cliutil.ListResourceByGVR(ctx, o.genericClientSet.dynamic, o.namespace, scopedgvrs, []metav1.ListOptions{o.clusterSelector}, &allErrors)...) + resourceLists = append(resourceLists, cliutil.ListResourceByGVR(ctx, o.genericClientSet.dynamic, metav1.NamespaceAll, globalGvrs, []metav1.ListOptions{o.clusterSelector}, &allErrors)...) if err := o.reportWritter.WriteObjects("manifests", resourceLists, o.outputFormat); err != nil { return err } diff --git a/internal/controller/plan/prepare.go b/internal/controller/plan/prepare.go index 5dc97857b0b..3dd42bfa057 100644 --- a/internal/controller/plan/prepare.go +++ b/internal/controller/plan/prepare.go @@ -20,10 +20,11 @@ along with this program. If not, see . package plan import ( - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/controller/component" "github.com/apecloud/kubeblocks/internal/controller/configuration" From 18bf1973826763596ff5bd697cb3c0bbbd3a7c3f Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Thu, 21 Sep 2023 20:27:48 +0800 Subject: [PATCH 15/58] fix: failed to get pods (#5141) (#5201) --- .../configuration/parallel_upgrade_policy.go | 10 +++-- .../parallel_upgrade_policy_test.go | 5 +++ controllers/apps/configuration/policy_util.go | 39 ++++++++++++++++--- .../configuration/rolling_upgrade_policy.go | 8 +++- .../apps/configuration/simple_policy.go | 10 ++++- .../apps/configuration/sync_upgrade_policy.go | 8 +++- controllers/apps/configuration/types.go | 25 +++++++++++- internal/common/stateful_set_utils.go | 9 ++++- 8 files changed, 96 insertions(+), 18 deletions(-) diff --git a/controllers/apps/configuration/parallel_upgrade_policy.go b/controllers/apps/configuration/parallel_upgrade_policy.go index 951b75df8e7..1a8d5045588 100644 --- a/controllers/apps/configuration/parallel_upgrade_policy.go +++ b/controllers/apps/configuration/parallel_upgrade_policy.go @@ -20,10 +20,10 @@ along with this program. If not, see . package configuration import ( - cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" corev1 "k8s.io/api/core/v1" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" podutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) @@ -38,10 +38,14 @@ func (p *parallelUpgradePolicy) Upgrade(params reconfigureParams) (ReturnedStatu var funcs RollingUpgradeFuncs switch params.WorkloadType() { - case appsv1alpha1.Consensus, appsv1alpha1.Stateful, appsv1alpha1.Replication: - funcs = GetRSMRollingUpgradeFuncs() default: return makeReturnedStatus(ESNotSupport), cfgcore.MakeError("not supported component workload type[%s]", params.WorkloadType()) + case appsv1alpha1.Consensus: + funcs = GetConsensusRollingUpgradeFuncs() + case appsv1alpha1.Stateful: + funcs = GetStatefulSetRollingUpgradeFuncs() + case appsv1alpha1.Replication: + funcs = GetReplicationRollingUpgradeFuncs() } pods, err := funcs.GetPodsFunc(params) diff --git a/controllers/apps/configuration/parallel_upgrade_policy_test.go b/controllers/apps/configuration/parallel_upgrade_policy_test.go index 27e6c12a905..e1d89971bb0 100644 --- a/controllers/apps/configuration/parallel_upgrade_policy_test.go +++ b/controllers/apps/configuration/parallel_upgrade_policy_test.go @@ -66,6 +66,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { return reconfigureClient, nil }), withMockStatefulSet(3, nil), + withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", }), @@ -94,6 +95,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { return reconfigureClient, nil }), withMockStatefulSet(3, nil), + withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", }), @@ -133,6 +135,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { return reconfigureClient, nil }), withMockStatefulSet(3, nil), + withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", }), @@ -174,6 +177,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { return reconfigureClient, nil }), withMockStatefulSet(3, nil), + withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", }), @@ -203,6 +207,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { withConfigSpec("for_test", map[string]string{ "key": "value", }), + withClusterComponent(2), withCDComponent(appsv1alpha1.Stateless, []appsv1alpha1.ComponentConfigSpec{{ ComponentTemplateSpec: appsv1alpha1.ComponentTemplateSpec{ Name: "for_test", diff --git a/controllers/apps/configuration/policy_util.go b/controllers/apps/configuration/policy_util.go index 10f3cacc91d..98fab144e7a 100644 --- a/controllers/apps/configuration/policy_util.go +++ b/controllers/apps/configuration/policy_util.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "net" + "sort" "strconv" appv1 "k8s.io/api/apps/v1" @@ -58,7 +59,13 @@ func getReplicationSetPods(params reconfigureParams) ([]corev1.Pod, error) { func GetComponentPods(params reconfigureParams) ([]corev1.Pod, error) { componentPods := make([]corev1.Pod, 0) for i := range params.ComponentUnits { - pods, err := common.GetPodListByStatefulSet(params.Ctx.Ctx, params.Client, ¶ms.ComponentUnits[i]) + pods, err := common.GetPodListByStatefulSetWithSelector(params.Ctx.Ctx, + params.Client, + ¶ms.ComponentUnits[i], + client.MatchingLabels{ + constant.KBAppComponentLabelKey: params.ClusterComponent.Name, + constant.AppInstanceLabelKey: params.Cluster.Name, + }) if err != nil { return nil, err } @@ -83,17 +90,39 @@ func CheckReconfigureUpdateProgress(pods []corev1.Pod, configKey, version string return readyPods } -func getRSMPods(params reconfigureParams) ([]corev1.Pod, error) { +func getStatefulSetPods(params reconfigureParams) ([]corev1.Pod, error) { + if len(params.ComponentUnits) != 1 { + return nil, core.MakeError("statefulSet component require only one statefulset, actual %d components", len(params.ComponentUnits)) + } + + pods, err := GetComponentPods(params) + if err != nil { + return nil, err + } + + sort.SliceStable(pods, func(i, j int) bool { + _, ordinal1 := intctrlutil.GetParentNameAndOrdinal(&pods[i]) + _, ordinal2 := intctrlutil.GetParentNameAndOrdinal(&pods[j]) + return ordinal1 < ordinal2 + }) + return pods, nil +} + +func getConsensusPods(params reconfigureParams) ([]corev1.Pod, error) { if len(params.ComponentUnits) > 1 { - return nil, core.MakeError("rsm component require only one statefulset, actual %d components", len(params.ComponentUnits)) + return nil, core.MakeError("consensus component require only one statefulset, actual %d components", len(params.ComponentUnits)) } if len(params.ComponentUnits) == 0 { return nil, nil } - stsObj := ¶ms.ComponentUnits[0] - pods, err := common.GetPodListByStatefulSet(params.Ctx.Ctx, params.Client, stsObj) + pods, err := GetComponentPods(params) + // stsObj := ¶ms.ComponentUnits[0] + // pods, err := components.GetPodListByStatefulSetWithSelector(params.Ctx.Ctx, params.Client, stsObj, client.MatchingLabels{ + // constant.KBAppComponentLabelKey: params.ClusterComponent.Name, + // constant.AppInstanceLabelKey: params.Cluster.Name, + // }) if err != nil { return nil, err } diff --git a/controllers/apps/configuration/rolling_upgrade_policy.go b/controllers/apps/configuration/rolling_upgrade_policy.go index 37ec806a884..356519f5365 100644 --- a/controllers/apps/configuration/rolling_upgrade_policy.go +++ b/controllers/apps/configuration/rolling_upgrade_policy.go @@ -56,8 +56,12 @@ func (r *rollingUpgradePolicy) Upgrade(params reconfigureParams) (ReturnedStatus var funcs RollingUpgradeFuncs switch params.WorkloadType() { - case appsv1alpha1.Consensus, appsv1alpha1.Replication, appsv1alpha1.Stateful: - funcs = GetRSMRollingUpgradeFuncs() + case appsv1alpha1.Consensus: + funcs = GetConsensusRollingUpgradeFuncs() + case appsv1alpha1.Replication: + funcs = GetReplicationRollingUpgradeFuncs() + case appsv1alpha1.Stateful: + funcs = GetStatefulSetRollingUpgradeFuncs() default: return makeReturnedStatus(ESNotSupport), cfgcore.MakeError("not supported component workload type[%s]", params.WorkloadType()) } diff --git a/controllers/apps/configuration/simple_policy.go b/controllers/apps/configuration/simple_policy.go index 17800055735..3933db346eb 100644 --- a/controllers/apps/configuration/simple_policy.go +++ b/controllers/apps/configuration/simple_policy.go @@ -43,8 +43,14 @@ func (s *simplePolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) switch params.WorkloadType() { default: return makeReturnedStatus(ESNotSupport), core.MakeError("not supported component workload type:[%s]", params.WorkloadType()) - case appsv1alpha1.Consensus, appsv1alpha1.Replication, appsv1alpha1.Stateful: - funcs = GetRSMRollingUpgradeFuncs() + case appsv1alpha1.Consensus: + funcs = GetConsensusRollingUpgradeFuncs() + compLists = fromStatefulSetObjects(params.ComponentUnits) + case appsv1alpha1.Stateful: + funcs = GetStatefulSetRollingUpgradeFuncs() + compLists = fromStatefulSetObjects(params.ComponentUnits) + case appsv1alpha1.Replication: + funcs = GetReplicationRollingUpgradeFuncs() compLists = fromStatefulSetObjects(params.ComponentUnits) case appsv1alpha1.Stateless: funcs = GetDeploymentRollingUpgradeFuncs() diff --git a/controllers/apps/configuration/sync_upgrade_policy.go b/controllers/apps/configuration/sync_upgrade_policy.go index ba8270a5d6a..1404024c32b 100644 --- a/controllers/apps/configuration/sync_upgrade_policy.go +++ b/controllers/apps/configuration/sync_upgrade_policy.go @@ -59,8 +59,12 @@ func (o *syncPolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) { return makeReturnedStatus(ESNotSupport), core.MakeError("not support component workload type[%s]", params.WorkloadType()) case appsv1alpha1.Stateless: funcs = GetDeploymentRollingUpgradeFuncs() - case appsv1alpha1.Consensus, appsv1alpha1.Replication, appsv1alpha1.Stateful: - funcs = GetRSMRollingUpgradeFuncs() + case appsv1alpha1.Consensus: + funcs = GetConsensusRollingUpgradeFuncs() + case appsv1alpha1.Stateful: + funcs = GetStatefulSetRollingUpgradeFuncs() + case appsv1alpha1.Replication: + funcs = GetReplicationRollingUpgradeFuncs() } pods, err := funcs.GetPodsFunc(params) diff --git a/controllers/apps/configuration/types.go b/controllers/apps/configuration/types.go index 67d17c7b3ad..32276a6ff3b 100644 --- a/controllers/apps/configuration/types.go +++ b/controllers/apps/configuration/types.go @@ -37,6 +37,9 @@ type RestartComponent func(client client.Client, ctx intctrlutil.RequestCtx, key type RestartContainerFunc func(pod *corev1.Pod, ctx context.Context, containerName []string, createConnFn createReconfigureClient) error type OnlineUpdatePodFunc func(pod *corev1.Pod, ctx context.Context, createClient createReconfigureClient, configSpec string, updatedParams map[string]string) error +// Node: Distinguish between implementation and interface. +// RollingUpgradeFuncs defines the interface, rsm is an implementation of Stateful, Replication and Consensus, not the only solution. + type RollingUpgradeFuncs struct { GetPodsFunc GetPodsFunc RestartContainerFunc RestartContainerFunc @@ -44,9 +47,27 @@ type RollingUpgradeFuncs struct { RestartComponent RestartComponent } -func GetRSMRollingUpgradeFuncs() RollingUpgradeFuncs { +func GetConsensusRollingUpgradeFuncs() RollingUpgradeFuncs { + return RollingUpgradeFuncs{ + GetPodsFunc: getConsensusPods, + RestartContainerFunc: commonStopContainerWithPod, + OnlineUpdatePodFunc: commonOnlineUpdateWithPod, + RestartComponent: restartStatefulComponent, + } +} + +func GetStatefulSetRollingUpgradeFuncs() RollingUpgradeFuncs { + return RollingUpgradeFuncs{ + GetPodsFunc: getStatefulSetPods, + RestartContainerFunc: commonStopContainerWithPod, + OnlineUpdatePodFunc: commonOnlineUpdateWithPod, + RestartComponent: restartStatefulComponent, + } +} + +func GetReplicationRollingUpgradeFuncs() RollingUpgradeFuncs { return RollingUpgradeFuncs{ - GetPodsFunc: getRSMPods, + GetPodsFunc: getReplicationSetPods, RestartContainerFunc: commonStopContainerWithPod, OnlineUpdatePodFunc: commonOnlineUpdateWithPod, RestartComponent: restartStatefulComponent, diff --git a/internal/common/stateful_set_utils.go b/internal/common/stateful_set_utils.go index e3779cb2ce0..da13b3dcb76 100644 --- a/internal/common/stateful_set_utils.go +++ b/internal/common/stateful_set_utils.go @@ -93,14 +93,19 @@ func ParseParentNameAndOrdinal(s string) (string, int32) { // GetPodListByStatefulSet gets statefulSet pod list. func GetPodListByStatefulSet(ctx context.Context, cli client.Client, stsObj *appsv1.StatefulSet) ([]corev1.Pod, error) { - podList := &corev1.PodList{} selector, err := metav1.LabelSelectorAsMap(stsObj.Spec.Selector) if err != nil { return nil, err } + return GetPodListByStatefulSetWithSelector(ctx, cli, stsObj, selector) +} + +// GetPodListByStatefulSetWithSelector gets statefulSet pod list. +func GetPodListByStatefulSetWithSelector(ctx context.Context, cli client.Client, stsObj *appsv1.StatefulSet, selector client.MatchingLabels) ([]corev1.Pod, error) { + podList := &corev1.PodList{} if err := cli.List(ctx, podList, &client.ListOptions{Namespace: stsObj.Namespace}, - client.MatchingLabels(selector)); err != nil { + selector); err != nil { return nil, err } var pods []corev1.Pod From 96a6889ecd4aa1e4b9465c110981ef34f59fafc1 Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Thu, 21 Sep 2023 20:30:22 +0800 Subject: [PATCH 16/58] chore: remove depends cv/cd (#5211) (#5215) --- apis/apps/v1alpha1/configuration_types.go | 16 +-- apis/apps/v1alpha1/zz_generated.deepcopy.go | 5 + .../apps.kubeblocks.io_configurations.yaml | 108 ++++++++++++++++-- .../configuration/configuration_controller.go | 4 + .../apps/configuration/configuration_test.go | 12 +- .../apps/configuration/reconcile_task.go | 2 +- .../apps/operations/reconfigure_test.go | 6 +- .../apps.kubeblocks.io_configurations.yaml | 108 ++++++++++++++++-- .../builder/builder_configuration.go | 18 +-- .../builder/builder_configuration_test.go | 25 ++-- .../controller/configuration/operator_test.go | 2 - internal/controller/configuration/pipeline.go | 5 +- .../controller/configuration/pipeline_test.go | 2 - 13 files changed, 243 insertions(+), 70 deletions(-) diff --git a/apis/apps/v1alpha1/configuration_types.go b/apis/apps/v1alpha1/configuration_types.go index 3699434ad4d..272f0f0fd99 100644 --- a/apis/apps/v1alpha1/configuration_types.go +++ b/apis/apps/v1alpha1/configuration_types.go @@ -37,9 +37,13 @@ type ConfigurationItemDetail struct { // +optional Version string `json:"version,omitempty"` + // configSpec is used to set the configuration template. + // +optional + ConfigSpec *ComponentConfigSpec `json:"configSpec"` + // Specify the configuration template. // +optional - ImportTemplateRef *ConfigTemplateExtension `json:"importTemplateRef,omitempty"` + ImportTemplateRef *ConfigTemplateExtension `json:"importTemplateRef"` // configFileParams is used to set the parameters to be updated. // +optional @@ -61,16 +65,6 @@ type ConfigurationSpec struct { // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.clusterRef" ComponentName string `json:"componentName"` - // clusterDefRef referencing ClusterDefinition name. This is an immutable attribute. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - ClusterDefRef string `json:"clusterDefRef"` - - // clusterVerRef referencing ClusterVersion name. - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - // +optional - ClusterVersionRef string `json:"clusterVerRef,omitempty"` - // customConfigurationItems describes user-defined config template. // +optional // +patchMergeKey=name diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index 780fd1369b0..d4a7a5d3d03 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -1974,6 +1974,11 @@ func (in *ConfigurationItem) DeepCopy() *ConfigurationItem { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigurationItemDetail) DeepCopyInto(out *ConfigurationItemDetail) { *out = *in + if in.ConfigSpec != nil { + in, out := &in.ConfigSpec, &out.ConfigSpec + *out = new(ComponentConfigSpec) + (*in).DeepCopyInto(*out) + } if in.ImportTemplateRef != nil { in, out := &in.ImportTemplateRef, &out.ImportTemplateRef *out = new(ConfigTemplateExtension) diff --git a/config/crd/bases/apps.kubeblocks.io_configurations.yaml b/config/crd/bases/apps.kubeblocks.io_configurations.yaml index 6cc4adc83b3..758fde16d99 100644 --- a/config/crd/bases/apps.kubeblocks.io_configurations.yaml +++ b/config/crd/bases/apps.kubeblocks.io_configurations.yaml @@ -36,21 +36,12 @@ spec: spec: description: ConfigurationSpec defines the desired state of Configuration properties: - clusterDefRef: - description: clusterDefRef referencing ClusterDefinition name. This - is an immutable attribute. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string clusterRef: description: clusterRef references Cluster name. type: string x-kubernetes-validations: - message: forbidden to update spec.clusterRef rule: self == oldSelf - clusterVerRef: - description: clusterVerRef referencing ClusterVersion name. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string componentName: description: componentName is cluster component name. type: string @@ -79,6 +70,104 @@ spec: description: configFileParams is used to set the parameters to be updated. type: object + configSpec: + description: configSpec is used to set the configuration template. + properties: + asEnvFrom: + description: 'asEnvFrom is optional: the list of containers + will be injected into EnvFrom.' + items: + type: string + type: array + x-kubernetes-list-type: set + constraintRef: + description: Specify the name of the referenced the configuration + constraints object. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + keys: + description: Specify a list of keys. If empty, ConfigConstraint + takes effect for all keys in configmap. + items: + type: string + type: array + x-kubernetes-list-type: set + legacyRenderedConfigSpec: + description: 'lazyRenderedConfigSpec is optional: specify + the secondary rendered config spec.' + properties: + namespace: + default: default + description: Specify the namespace of the referenced + the configuration template ConfigMap object. An empty + namespace is equivalent to the "default" namespace. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ + type: string + policy: + default: none + description: policy defines how to merge external imported + templates into component templates. + enum: + - patch + - replace + - none + type: string + templateRef: + description: Specify the name of the referenced the + configuration template ConfigMap object. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + required: + - templateRef + type: object + name: + description: Specify the name of configuration template. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + namespace: + default: default + description: Specify the namespace of the referenced the + configuration template ConfigMap object. An empty namespace + is equivalent to the "default" namespace. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ + type: string + templateRef: + description: Specify the name of the referenced the configuration + template ConfigMap object. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + volumeName: + description: volumeName is the volume name of PodTemplate, + which the configuration file produced through the configuration + template will be mounted to the corresponding volume. + Must be a DNS_LABEL name. The volume name must be defined + in podSpec.containers[*].volumeMounts. + maxLength: 63 + pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ + type: string + required: + - name + - templateRef + - volumeName + type: object importTemplateRef: description: Specify the configuration template. properties: @@ -124,7 +213,6 @@ spec: - name x-kubernetes-list-type: map required: - - clusterDefRef - clusterRef - componentName type: object diff --git a/controllers/apps/configuration/configuration_controller.go b/controllers/apps/configuration/configuration_controller.go index 785d4c3f5fe..3b82786427d 100644 --- a/controllers/apps/configuration/configuration_controller.go +++ b/controllers/apps/configuration/configuration_controller.go @@ -177,6 +177,10 @@ func (r *ConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { } func fromItemStatus(ctx intctrlutil.RequestCtx, status *appsv1alpha1.ConfigurationStatus, item appsv1alpha1.ConfigurationItemDetail) *appsv1alpha1.ConfigurationItemDetailStatus { + if item.ConfigSpec == nil { + ctx.Log.WithName(item.Name).Error(core.MakeError("configSpec phase is not ready and pass: %v", item), "") + return nil + } for i := range status.ConfigurationItemStatus { itemStatus := &status.ConfigurationItemStatus[i] switch { diff --git a/controllers/apps/configuration/configuration_test.go b/controllers/apps/configuration/configuration_test.go index bd711a76826..8bb5bbe0548 100644 --- a/controllers/apps/configuration/configuration_test.go +++ b/controllers/apps/configuration/configuration_test.go @@ -81,9 +81,15 @@ func mockConfigResource() (*corev1.ConfigMap, *appsv1alpha1.ConfigConstraint) { configuration := builder.NewConfigurationBuilder(testCtx.DefaultNamespace, core.GenerateComponentConfigurationName(clusterName, statefulCompName)). ClusterRef(clusterName). Component(statefulCompName). - ClusterDefRef(clusterDefName). - ClusterVerRef(clusterVersionName). - AddConfigurationItem(configSpecName). + AddConfigurationItem(appsv1alpha1.ComponentConfigSpec{ + ComponentTemplateSpec: appsv1alpha1.ComponentTemplateSpec{ + Name: configSpecName, + TemplateRef: configmap.Name, + Namespace: configmap.Namespace, + VolumeName: configVolumeName, + }, + ConfigConstraintRef: constraint.Name, + }). GetObject() Expect(testCtx.CreateObj(testCtx.Ctx, configuration)).Should(Succeed()) diff --git a/controllers/apps/configuration/reconcile_task.go b/controllers/apps/configuration/reconcile_task.go index 874cdc28096..f511bb2eeb8 100644 --- a/controllers/apps/configuration/reconcile_task.go +++ b/controllers/apps/configuration/reconcile_task.go @@ -44,7 +44,7 @@ func NewTask(item appsv1alpha1.ConfigurationItemDetail, status *appsv1alpha1.Con Name: item.Name, Status: status, Do: func(fetcher *Task, synComponent *component.SynthesizedComponent, revision string) error { - configSpec := component.GetConfigSpecByName(synComponent, item.Name) + configSpec := item.ConfigSpec if configSpec == nil { return core.MakeError("not found config spec: %s", item.Name) } diff --git a/controllers/apps/operations/reconfigure_test.go b/controllers/apps/operations/reconfigure_test.go index 5acb8242783..c005fb6d732 100644 --- a/controllers/apps/operations/reconfigure_test.go +++ b/controllers/apps/operations/reconfigure_test.go @@ -94,9 +94,7 @@ var _ = Describe("Reconfigure OpsRequest", func() { var cmObj *corev1.ConfigMap configuration := builder.NewConfigurationBuilder(testCtx.DefaultNamespace, core.GenerateComponentConfigurationName(clusterName, componentName)). ClusterRef(clusterName). - Component(componentName). - ClusterDefRef(clusterDefinitionName). - ClusterVerRef(clusterVersionName) + Component(componentName) for _, configSpec := range cdComponent.ConfigSpecs { cmInsName := core.GetComponentCfgName(clusterName, componentName, configSpec.Name) cfgCM := testapps.NewCustomizedObj("operations_config/config-template.yaml", @@ -112,7 +110,7 @@ var _ = Describe("Reconfigure OpsRequest", func() { constant.CMConfigurationTypeLabelKey, constant.ConfigInstanceType, ), ) - configuration.AddConfigurationItem(configSpec.Name) + configuration.AddConfigurationItem(configSpec) Expect(testCtx.CheckedCreateObj(ctx, cfgCM)).Should(Succeed()) cmObj = cfgCM } diff --git a/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml b/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml index 6cc4adc83b3..758fde16d99 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml @@ -36,21 +36,12 @@ spec: spec: description: ConfigurationSpec defines the desired state of Configuration properties: - clusterDefRef: - description: clusterDefRef referencing ClusterDefinition name. This - is an immutable attribute. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string clusterRef: description: clusterRef references Cluster name. type: string x-kubernetes-validations: - message: forbidden to update spec.clusterRef rule: self == oldSelf - clusterVerRef: - description: clusterVerRef referencing ClusterVersion name. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string componentName: description: componentName is cluster component name. type: string @@ -79,6 +70,104 @@ spec: description: configFileParams is used to set the parameters to be updated. type: object + configSpec: + description: configSpec is used to set the configuration template. + properties: + asEnvFrom: + description: 'asEnvFrom is optional: the list of containers + will be injected into EnvFrom.' + items: + type: string + type: array + x-kubernetes-list-type: set + constraintRef: + description: Specify the name of the referenced the configuration + constraints object. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + keys: + description: Specify a list of keys. If empty, ConfigConstraint + takes effect for all keys in configmap. + items: + type: string + type: array + x-kubernetes-list-type: set + legacyRenderedConfigSpec: + description: 'lazyRenderedConfigSpec is optional: specify + the secondary rendered config spec.' + properties: + namespace: + default: default + description: Specify the namespace of the referenced + the configuration template ConfigMap object. An empty + namespace is equivalent to the "default" namespace. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ + type: string + policy: + default: none + description: policy defines how to merge external imported + templates into component templates. + enum: + - patch + - replace + - none + type: string + templateRef: + description: Specify the name of the referenced the + configuration template ConfigMap object. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + required: + - templateRef + type: object + name: + description: Specify the name of configuration template. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + namespace: + default: default + description: Specify the namespace of the referenced the + configuration template ConfigMap object. An empty namespace + is equivalent to the "default" namespace. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ + type: string + templateRef: + description: Specify the name of the referenced the configuration + template ConfigMap object. + maxLength: 63 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + volumeName: + description: volumeName is the volume name of PodTemplate, + which the configuration file produced through the configuration + template will be mounted to the corresponding volume. + Must be a DNS_LABEL name. The volume name must be defined + in podSpec.containers[*].volumeMounts. + maxLength: 63 + pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ + type: string + required: + - name + - templateRef + - volumeName + type: object importTemplateRef: description: Specify the configuration template. properties: @@ -124,7 +213,6 @@ spec: - name x-kubernetes-list-type: map required: - - clusterDefRef - clusterRef - componentName type: object diff --git a/internal/controller/builder/builder_configuration.go b/internal/controller/builder/builder_configuration.go index d7ac8acb893..b065231fe16 100644 --- a/internal/controller/builder/builder_configuration.go +++ b/internal/controller/builder/builder_configuration.go @@ -43,17 +43,11 @@ func (c *ConfigurationBuilder) Component(component string) *ConfigurationBuilder return c } -func (c *ConfigurationBuilder) ClusterVerRef(clusterVer string) *ConfigurationBuilder { - c.get().Spec.ClusterVersionRef = clusterVer - return c -} - -func (c *ConfigurationBuilder) ClusterDefRef(clusterDef string) *ConfigurationBuilder { - c.get().Spec.ClusterDefRef = clusterDef - return c -} - -func (c *ConfigurationBuilder) AddConfigurationItem(name string) *ConfigurationBuilder { - c.get().Spec.ConfigItemDetails = append(c.get().Spec.ConfigItemDetails, v1alpha1.ConfigurationItemDetail{Name: name}) +func (c *ConfigurationBuilder) AddConfigurationItem(configSpec v1alpha1.ComponentConfigSpec) *ConfigurationBuilder { + c.get().Spec.ConfigItemDetails = append(c.get().Spec.ConfigItemDetails, + v1alpha1.ConfigurationItemDetail{ + Name: configSpec.Name, + ConfigSpec: configSpec.DeepCopy(), + }) return c } diff --git a/internal/controller/builder/builder_configuration_test.go b/internal/controller/builder/builder_configuration_test.go index a6b6bede42f..ac1f2fba2d2 100644 --- a/internal/controller/builder/builder_configuration_test.go +++ b/internal/controller/builder/builder_configuration_test.go @@ -23,33 +23,36 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" ) var _ = Describe("configuration builder", func() { It("should work well", func() { const ( - clusterName = "test" - componentName = "mysql" - clusterDefName = "mysql-cd" - clusterVerName = "mysql-cv" - ns = "default" + clusterName = "test" + componentName = "mysql" + ns = "default" ) name := core.GenerateComponentConfigurationName(clusterName, componentName) config := NewConfigurationBuilder(ns, name). ClusterRef(clusterName). Component(componentName). - ClusterDefRef(clusterDefName). - ClusterVerRef(clusterVerName). - AddConfigurationItem("mysql-config"). - AddConfigurationItem("mysql-oteld-config"). + AddConfigurationItem(v1alpha1.ComponentConfigSpec{ + ComponentTemplateSpec: v1alpha1.ComponentTemplateSpec{ + Name: "mysql-config", + }, + }). + AddConfigurationItem(v1alpha1.ComponentConfigSpec{ + ComponentTemplateSpec: v1alpha1.ComponentTemplateSpec{ + Name: "mysql-oteld-config", + }, + }). GetObject() Expect(config.Name).Should(BeEquivalentTo(name)) Expect(config.Spec.ClusterRef).Should(BeEquivalentTo(clusterName)) Expect(config.Spec.ComponentName).Should(BeEquivalentTo(componentName)) - Expect(config.Spec.ClusterVersionRef).Should(BeEquivalentTo(clusterVerName)) - Expect(config.Spec.ClusterDefRef).Should(BeEquivalentTo(clusterDefName)) Expect(len(config.Spec.ConfigItemDetails)).Should(Equal(2)) }) }) diff --git a/internal/controller/configuration/operator_test.go b/internal/controller/configuration/operator_test.go index 05b286c13e4..7db807a94a0 100644 --- a/internal/controller/configuration/operator_test.go +++ b/internal/controller/configuration/operator_test.go @@ -92,8 +92,6 @@ var _ = Describe("ConfigurationOperatorTest", func() { cfgcore.GenerateComponentConfigurationName(clusterName, mysqlCompName)). ClusterRef(clusterName). Component(mysqlCompName). - ClusterVerRef(clusterVersionName). - ClusterDefRef(clusterDefName). GetObject() configConstraint = &appsv1alpha1.ConfigConstraint{ ObjectMeta: metav1.ObjectMeta{ diff --git a/internal/controller/configuration/pipeline.go b/internal/controller/configuration/pipeline.go index c1dc0af389a..a64a6250d98 100644 --- a/internal/controller/configuration/pipeline.go +++ b/internal/controller/configuration/pipeline.go @@ -208,14 +208,11 @@ func (p *pipeline) createConfiguration() *appsv1alpha1.Configuration { builder := builder.NewConfigurationBuilder(p.Namespace, core.GenerateComponentConfigurationName(p.ClusterName, p.ComponentName), ) - for _, template := range p.ctx.Component.ConfigTemplates { - builder.AddConfigurationItem(template.Name) + builder.AddConfigurationItem(template) } return builder.Component(p.ComponentName). ClusterRef(p.ClusterName). - ClusterDefRef(p.ctx.Cluster.Spec.ClusterDefRef). - ClusterVerRef(p.ctx.Cluster.Spec.ClusterVersionRef). GetObject() } diff --git a/internal/controller/configuration/pipeline_test.go b/internal/controller/configuration/pipeline_test.go index 3f2b8475020..89d935269ab 100644 --- a/internal/controller/configuration/pipeline_test.go +++ b/internal/controller/configuration/pipeline_test.go @@ -124,8 +124,6 @@ max_connections = '1000' cfgcore.GenerateComponentConfigurationName(clusterName, mysqlCompName)). ClusterRef(clusterName). Component(mysqlCompName). - ClusterVerRef(clusterVersionName). - ClusterDefRef(clusterDefName). GetObject() configConstraint = &appsv1alpha1.ConfigConstraint{ ObjectMeta: metav1.ObjectMeta{ From b706238312da472f35536a0696c4de46ba0f34f9 Mon Sep 17 00:00:00 2001 From: Dereck Chen <92903435+derecknowayback@users.noreply.github.com> Date: Fri, 22 Sep 2023 11:04:20 +0800 Subject: [PATCH 17/58] chore: preflight support aws us region (#5204) --- internal/cli/cmd/kubeblocks/data/eks_hostpreflight.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/internal/cli/cmd/kubeblocks/data/eks_hostpreflight.yaml b/internal/cli/cmd/kubeblocks/data/eks_hostpreflight.yaml index ab382e8c301..09b04927af4 100644 --- a/internal/cli/cmd/kubeblocks/data/eks_hostpreflight.yaml +++ b/internal/cli/cmd/kubeblocks/data/eks_hostpreflight.yaml @@ -36,8 +36,12 @@ spec: regionNames: - cn-northwest-1 - cn-north-1 + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 outcomes: - warn: - message: k8s cluster region doesn't belong to amazon china + message: k8s cluster region doesn't belong to amazon china, be aware of the network reachability for cluster regions that located at East-Asia - pass: - message: k8s cluster region belongs to amazon china \ No newline at end of file + message: k8s cluster region belongs to amazon china/us \ No newline at end of file From cd71dfc326bab1c2be5be6b39a26e2d1f6a1dd64 Mon Sep 17 00:00:00 2001 From: yijing Date: Fri, 22 Sep 2023 14:54:32 +0800 Subject: [PATCH 18/58] chore: Add test kbcli cases (#5231) --- .github/workflows/e2e-kbcli.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/e2e-kbcli.yml b/.github/workflows/e2e-kbcli.yml index 06e4630dbb1..0528bc9728d 100644 --- a/.github/workflows/e2e-kbcli.yml +++ b/.github/workflows/e2e-kbcli.yml @@ -12,7 +12,7 @@ on: required: false default: '' TEST_TYPE: - description: 'test type (e.g. mysql|postgres|redis|mongo|kafka|pulsar|weaviate|qdrant|smarte|scale|greptimedb|nebula|risingwave|starrocks|oceanbase|foxlake|oracle-mysql)' + description: 'test type (e.g. mysql|postgres|redis|mongo|kafka|pulsar|weaviate|qdrant|smarte|scale|greptimedb|nebula|risingwave|starrocks|oceanbase|foxlake|oracle-mysql|asmysql|openldap)' required: false default: '' CLOUD_PROVIDER: @@ -87,7 +87,7 @@ jobs: e2e: name: ${{ inputs.CLOUD_PROVIDER }} needs: check - uses: apecloud/apecloud-cd/.github/workflows/kbcli-test-k8s.yml@v0.1.25 + uses: apecloud/apecloud-cd/.github/workflows/kbcli-test-k8s.yml@main with: CLOUD_PROVIDER: "${{ inputs.CLOUD_PROVIDER }}" KB_VERSION: "${{ needs.check.outputs.release-version }}" @@ -96,7 +96,7 @@ jobs: INSTANCE_TYPE: "${{ inputs.INSTANCE_TYPE }}" REGION: "${{ needs.check.outputs.cluster-region }}" BRANCH_NAME: "${{ inputs.BRANCH_NAME }}" - APECD_REF: "v0.1.25" + APECD_REF: "main" ARGS: "${{ inputs.ARGS }}" TEST_TYPE: "${{ inputs.TEST_TYPE }}" secrets: inherit From e4b1e69989a6577efa4334ea05541020f29a97a4 Mon Sep 17 00:00:00 2001 From: Lek Date: Fri, 22 Sep 2023 17:25:14 +0800 Subject: [PATCH 19/58] fix: add special handling for isPodReady (#5230) --- lorry/highavailability/ha.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lorry/highavailability/ha.go b/lorry/highavailability/ha.go index 6f3a2761412..b8b05fbb9d7 100644 --- a/lorry/highavailability/ha.go +++ b/lorry/highavailability/ha.go @@ -413,6 +413,12 @@ func (ha *Ha) IsPodReady() (bool, error) { pinger.Interval = 500 * time.Millisecond err = pinger.Run() if err != nil { + // For container runtimes like Containerd, unprivileged users can't send icmp echo packets. + // As a temporary workaround, special handling is being implemented to bypass this limitation. + if strings.Contains(err.Error(), "socket: permission denied") { + ha.logger.Info("ping failed, socket: permission denied, but temporarily return true") + return true, nil + } ha.logger.Error(err, fmt.Sprintf("ping domain:%s failed", domain)) return false, err } From 40feafed69cd75e1274c8edc95661ac433220df8 Mon Sep 17 00:00:00 2001 From: free6om Date: Sat, 23 Sep 2023 15:05:29 +0800 Subject: [PATCH 20/58] fix: recover missing lorry env (#5235) --- internal/constant/const.go | 3 +- internal/controller/component/probe_utils.go | 39 ++++---- .../controller/component/probe_utils_test.go | 2 +- .../rsm/transformer_object_generation.go | 89 +++++++++++++------ internal/controller/rsm/types.go | 5 +- lorry/binding/base.go | 2 +- 6 files changed, 92 insertions(+), 48 deletions(-) diff --git a/internal/constant/const.go b/internal/constant/const.go index a7a53b07d62..8ba749717ab 100644 --- a/internal/constant/const.go +++ b/internal/constant/const.go @@ -64,7 +64,6 @@ const ( const ( KBToolsImage = "KUBEBLOCKS_TOOLS_IMAGE" - KBProbeImage = "KUBEBLOCKS_PROBE_IMAGE" KBImagePullPolicy = "KUBEBLOCKS_IMAGE_PULL_POLICY" KBDataScriptClientsImage = "KUBEBLOCKS_DATASCRIPT_CLIENTS_IMAGE" ) @@ -230,7 +229,7 @@ const ( ProbeHTTPPortName = "probe-http-port" ProbeGRPCPortName = "probe-grpc-port" ProbeInitContainerName = "kb-initprobe" - LorryContainerName = "kb-lorry" + WeSyncerContainerName = "kb-we-syncer" StatusProbeContainerName = "kb-checkstatus" RunningProbeContainerName = "kb-checkrunning" VolumeProtectionProbeContainerName = "kb-volume-protection" diff --git a/internal/controller/component/probe_utils.go b/internal/controller/component/probe_utils.go index df1425429ea..e9e6b2a2c6c 100644 --- a/internal/controller/component/probe_utils.go +++ b/internal/controller/component/probe_utils.go @@ -53,8 +53,8 @@ var ( ) func buildLorryContainers(reqCtx intctrlutil.RequestCtx, component *SynthesizedComponent) error { - container := buildLorryContainer() - lorryContainers := []corev1.Container{} + container := buildBasicContainer() + var lorryContainers []corev1.Container componentLorry := component.Probes if componentLorry == nil { return nil @@ -95,17 +95,23 @@ func buildLorryContainers(reqCtx intctrlutil.RequestCtx, component *SynthesizedC lorryContainers = append(lorryContainers, *c) } - if len(lorryContainers) >= 1 { - container := &lorryContainers[0] - buildLorryServiceContainer(component, container, int(lorrySvcHTTPPort), int(lorrySvcGRPCPort)) + // inject WeSyncer(currently part of Lorry) in cluster controller. + // as all the above features share the lorry service, only one lorry need to be injected. + // if none of the above feature enabled, WeSyncer still need to be injected for the HA feature functions well. + if len(lorryContainers) == 0 { + weSyncerContainer := container.DeepCopy() + buildWeSyncerContainer(weSyncerContainer, int(lorrySvcHTTPPort)) + lorryContainers = append(lorryContainers, *weSyncerContainer) } + buildLorryServiceContainer(component, &lorryContainers[0], int(lorrySvcHTTPPort), int(lorrySvcGRPCPort)) + reqCtx.Log.V(1).Info("lorry", "containers", lorryContainers) component.PodSpec.Containers = append(component.PodSpec.Containers, lorryContainers...) return nil } -func buildLorryContainer() *corev1.Container { +func buildBasicContainer() *corev1.Container { return builder.NewContainerBuilder("string"). SetImage("registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"). SetImagePullPolicy(corev1.PullIfNotPresent). @@ -127,10 +133,6 @@ func buildLorryContainer() *corev1.Container { }, }, }). - SetReadinessProbe(corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{Command: []string{}}, - }}). SetStartupProbe(corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ TCPSocket: &corev1.TCPSocketAction{Port: intstr.FromInt(3501)}, @@ -206,33 +208,38 @@ func buildLorryServiceContainer(component *SynthesizedComponent, container *core } } +func buildWeSyncerContainer(weSyncerContainer *corev1.Container, probeSvcHTTPPort int) { + weSyncerContainer.Name = constant.WeSyncerContainerName + weSyncerContainer.StartupProbe.TCPSocket.Port = intstr.FromInt(probeSvcHTTPPort) +} + func buildStatusProbeContainer(characterType string, statusProbeContainer *corev1.Container, probeSetting *appsv1alpha1.ClusterDefinitionProbe, probeSvcHTTPPort int) { statusProbeContainer.Name = constant.StatusProbeContainerName - probe := statusProbeContainer.ReadinessProbe + probe := &corev1.Probe{} httpGet := &corev1.HTTPGetAction{} httpGet.Path = fmt.Sprintf(checkStatusURIFormat, characterType) httpGet.Port = intstr.FromInt(probeSvcHTTPPort) - probe.Exec = nil probe.HTTPGet = httpGet probe.PeriodSeconds = probeSetting.PeriodSeconds probe.TimeoutSeconds = probeSetting.TimeoutSeconds probe.FailureThreshold = probeSetting.FailureThreshold + statusProbeContainer.ReadinessProbe = probe statusProbeContainer.StartupProbe.TCPSocket.Port = intstr.FromInt(probeSvcHTTPPort) } func buildRunningProbeContainer(characterType string, runningProbeContainer *corev1.Container, probeSetting *appsv1alpha1.ClusterDefinitionProbe, probeSvcHTTPPort int) { runningProbeContainer.Name = constant.RunningProbeContainerName - probe := runningProbeContainer.ReadinessProbe + probe := &corev1.Probe{} httpGet := &corev1.HTTPGetAction{} httpGet.Path = fmt.Sprintf(checkRunningURIFormat, characterType) httpGet.Port = intstr.FromInt(probeSvcHTTPPort) - probe.Exec = nil probe.HTTPGet = httpGet probe.PeriodSeconds = probeSetting.PeriodSeconds probe.TimeoutSeconds = probeSetting.TimeoutSeconds probe.FailureThreshold = probeSetting.FailureThreshold + runningProbeContainer.ReadinessProbe = probe runningProbeContainer.StartupProbe.TCPSocket.Port = intstr.FromInt(probeSvcHTTPPort) } @@ -242,15 +249,15 @@ func volumeProtectionEnabled(component *SynthesizedComponent) bool { func buildVolumeProtectionProbeContainer(characterType string, c *corev1.Container, probeSvcHTTPPort int) { c.Name = constant.VolumeProtectionProbeContainerName - probe := c.ReadinessProbe + probe := &corev1.Probe{} httpGet := &corev1.HTTPGetAction{} httpGet.Path = fmt.Sprintf(volumeProtectionURIFormat, characterType) httpGet.Port = intstr.FromInt(probeSvcHTTPPort) - probe.Exec = nil probe.HTTPGet = httpGet probe.PeriodSeconds = defaultVolumeProtectionProbe.PeriodSeconds probe.TimeoutSeconds = defaultVolumeProtectionProbe.TimeoutSeconds probe.FailureThreshold = defaultVolumeProtectionProbe.FailureThreshold + c.ReadinessProbe = probe c.StartupProbe.TCPSocket.Port = intstr.FromInt(probeSvcHTTPPort) } diff --git a/internal/controller/component/probe_utils_test.go b/internal/controller/component/probe_utils_test.go index 9b6b7a67e4a..ed022cd874b 100644 --- a/internal/controller/component/probe_utils_test.go +++ b/internal/controller/component/probe_utils_test.go @@ -44,7 +44,7 @@ var _ = Describe("probe_utils", func() { var clusterDefProbe *appsv1alpha1.ClusterDefinitionProbe BeforeEach(func() { - container = buildLorryContainer() + container = buildBasicContainer() probeServiceHTTPPort, probeServiceGrpcPort = 3501, 50001 clusterDefProbe = &appsv1alpha1.ClusterDefinitionProbe{} diff --git a/internal/controller/rsm/transformer_object_generation.go b/internal/controller/rsm/transformer_object_generation.go index f7403abc923..bf8ae37f1cf 100644 --- a/internal/controller/rsm/transformer_object_generation.go +++ b/internal/controller/rsm/transformer_object_generation.go @@ -27,6 +27,7 @@ import ( "strings" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -479,38 +480,74 @@ func injectRoleProbeAgentContainer(rsm workloads.ReplicatedStateMachine, templat }, ) + readinessProbe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: roleProbeURI, + Port: intstr.FromInt(probeDaemonPort), + }, + }, + InitialDelaySeconds: roleProbe.InitialDelaySeconds, + TimeoutSeconds: roleProbe.TimeoutSeconds, + PeriodSeconds: roleProbe.PeriodSeconds, + SuccessThreshold: roleProbe.SuccessThreshold, + FailureThreshold: roleProbe.FailureThreshold, + } + + tryToGetRoleProbeContainer := func() *corev1.Container { + for i, container := range template.Spec.Containers { + if container.Image != image { + continue + } + if len(container.Command) == 0 || container.Command[0] != roleProbeBinaryName { + continue + } + if container.ReadinessProbe != nil { + continue + } + // if all the above conditions satisfied, container that can do the role probe job found + return &template.Spec.Containers[i] + } + return nil + } + // if role probe container exists, update the readiness probe, env and serving container port + if container := tryToGetRoleProbeContainer(); container != nil { + // presume the first port is the http port. + // this is an easily broken contract between rsm controller and cluster controller. + // TODO(free6om): design a better way to do this after Lorry-WeSyncer separation done + readinessProbe.HTTPGet.Port = intstr.FromInt(int(container.Ports[0].ContainerPort)) + container.ReadinessProbe = readinessProbe + for _, e := range env { + if slices.IndexFunc(container.Env, func(v corev1.EnvVar) bool { + return v.Name == e.Name + }) >= 0 { + continue + } + container.Env = append(container.Env, e) + } + return + } + + // if role probe container doesn't exist, create a new one // build container - container := corev1.Container{ - Name: roleProbeName, - Image: image, - ImagePullPolicy: "IfNotPresent", - Command: []string{ - "lorry", + container := builder.NewContainerBuilder(roleProbeContainerName). + SetImage(image). + SetImagePullPolicy(corev1.PullIfNotPresent). + AddCommands([]string{ + roleProbeBinaryName, "--port", strconv.Itoa(probeDaemonPort), - }, - Ports: []corev1.ContainerPort{{ + }...). + AddEnv(env...). + AddPorts(corev1.ContainerPort{ ContainerPort: int32(probeDaemonPort), - Name: roleProbeName, + Name: roleProbeContainerName, Protocol: "TCP", - }}, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: roleProbeURI, - Port: intstr.FromInt(probeDaemonPort), - }, - }, - InitialDelaySeconds: roleProbe.InitialDelaySeconds, - TimeoutSeconds: roleProbe.TimeoutSeconds, - PeriodSeconds: roleProbe.PeriodSeconds, - SuccessThreshold: roleProbe.SuccessThreshold, - FailureThreshold: roleProbe.FailureThreshold, - }, - Env: env, - } + }). + SetReadinessProbe(*readinessProbe). + GetObject() // inject role probe container - template.Spec.Containers = append(template.Spec.Containers, container) + template.Spec.Containers = append(template.Spec.Containers, *container) } func injectProbeActionContainer(rsm workloads.ReplicatedStateMachine, template *corev1.PodTemplateSpec, actionSvcPorts []int32, credentialEnv []corev1.EnvVar) { diff --git a/internal/controller/rsm/types.go b/internal/controller/rsm/types.go index aaf799b83d2..a7f5aaa6f0e 100644 --- a/internal/controller/rsm/types.go +++ b/internal/controller/rsm/types.go @@ -69,7 +69,8 @@ const ( jobScenarioMembership = "membership-reconfiguration" jobScenarioUpdate = "pod-update" - roleProbeName = "kb-role-probe" + roleProbeContainerName = "kb-role-probe" + roleProbeBinaryName = "lorry" roleAgentVolumeName = "role-agent" roleAgentInstallerName = "role-agent-installer" roleAgentVolumeMountPath = "/role-probe" @@ -89,7 +90,7 @@ const ( targetHostVarName = "KB_RSM_TARGET_HOST" RoleUpdateMechanismVarName = "KB_RSM_ROLE_UPDATE_MECHANISM" directAPIServerEventFieldPath = "spec.containers{sqlchannel}" - readinessProbeEventFieldPath = "spec.containers{" + roleProbeName + "}" + readinessProbeEventFieldPath = "spec.containers{" + roleProbeContainerName + "}" legacyEventFieldPath = "spec.containers{kb-checkrole}" checkRoleEventReason = "checkRole" diff --git a/lorry/binding/base.go b/lorry/binding/base.go index d28d3465ac0..5652b7dabb5 100644 --- a/lorry/binding/base.go +++ b/lorry/binding/base.go @@ -218,7 +218,7 @@ func (ops *BaseOperations) CheckRoleOps(ctx context.Context, req *ProbeRequest, } // sql exec timeout needs to be less than httpget's timeout which by default 1s. - ctx1, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + ctx1, cancel := context.WithTimeout(ctx, 999*time.Millisecond) defer cancel() role, err := ops.GetRole(ctx1, req, resp) if err != nil { From d516d6b25dfc928206810821adda13b03caebb0c Mon Sep 17 00:00:00 2001 From: "Jerry.Zhou" <89451328+kissycn@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:18:55 +0800 Subject: [PATCH 21/58] fix: fix openldap VolumeExpansion ops failed (#5236) --- deploy/openldap-cluster/templates/cluster.yaml | 2 +- deploy/openldap-cluster/values.yaml | 2 +- deploy/openldap/templates/clusterdefinition.yaml | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/deploy/openldap-cluster/templates/cluster.yaml b/deploy/openldap-cluster/templates/cluster.yaml index abfd9d351e4..f13b5b8b097 100644 --- a/deploy/openldap-cluster/templates/cluster.yaml +++ b/deploy/openldap-cluster/templates/cluster.yaml @@ -35,7 +35,7 @@ spec: {{- end }} {{- if .Values.persistence.enabled }} volumeClaimTemplates: - - name: data # ref clusterdefinition components.containers.volumeMounts.name + - name: openldap # ref clusterdefinition components.containers.volumeMounts.name spec: storageClassName: {{ .Values.persistence.data.storageClassName }} accessModes: diff --git a/deploy/openldap-cluster/values.yaml b/deploy/openldap-cluster/values.yaml index 7a545409932..f0224d42356 100644 --- a/deploy/openldap-cluster/values.yaml +++ b/deploy/openldap-cluster/values.yaml @@ -46,7 +46,7 @@ tolerations: [] persistence: ## @param shard[*].persistence.enabled Enable persistence using Persistent Volume Claims ## - enabled: false + enabled: true ## `data` volume settings ## data: diff --git a/deploy/openldap/templates/clusterdefinition.yaml b/deploy/openldap/templates/clusterdefinition.yaml index 86fc0d847e8..95079821c5e 100644 --- a/deploy/openldap/templates/clusterdefinition.yaml +++ b/deploy/openldap/templates/clusterdefinition.yaml @@ -29,8 +29,6 @@ spec: - mountPath: /etc/ldap/slapd.d name: openldap subPath: ldap-config - - mountPath: /container/service/slapd/assets/config/bootstrap/ldif/custom - name: openldap-bootstrap ports: - containerPort: 389 name: ldap From 2878dedece8b251dd0e12df94746f4aa6d07ed9c Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:50:48 +0800 Subject: [PATCH 22/58] feat: tplengine support customized syntax (#5210) (#5217) --- internal/gotemplate/functional_test.go | 167 +++++++++++++++++++++++++ internal/gotemplate/tpl_engine.go | 39 +++++- internal/gotemplate/tpl_engine_test.go | 38 ++++++ 3 files changed, 243 insertions(+), 1 deletion(-) create mode 100644 internal/gotemplate/functional_test.go diff --git a/internal/gotemplate/functional_test.go b/internal/gotemplate/functional_test.go new file mode 100644 index 00000000000..0f39ada1083 --- /dev/null +++ b/internal/gotemplate/functional_test.go @@ -0,0 +1,167 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package gotemplate + +import ( + "reflect" + "testing" +) + +func Test_regexStringSubmatch(t *testing.T) { + type args struct { + regex string + s string + } + tests := []struct { + name string + args args + want []string + wantErr bool + }{{ + name: "test", + args: args{ + regex: `^(\d+)K$`, + s: "123K", + }, + want: []string{"123K", "123"}, + wantErr: false, + }, { + name: "test", + args: args{ + regex: `^(\d+)M$`, + s: "123", + }, + want: nil, + wantErr: false, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := regexStringSubmatch(tt.args.regex, tt.args.s) + if (err != nil) != tt.wantErr { + t.Errorf("regexStringSubmatch() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("regexStringSubmatch() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_fromYAML(t *testing.T) { + type args struct { + str string + } + tests := []struct { + name string + args args + want map[string]interface{} + wantErr bool + }{{ + name: "test", + args: args{ + str: ``, + }, + want: map[string]interface{}{}, + }, { + name: "test", + args: args{ + str: `efg`, + }, + want: map[string]interface{}{}, + wantErr: true, + }, { + name: "test", + args: args{ + str: `a: + b: "c" + c: "d" +`, + }, + want: map[string]interface{}{ + "a": map[interface{}]interface{}{ + "b": "c", + "c": "d", + }, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := fromYAML(tt.args.str) + if (err != nil) != tt.wantErr { + t.Errorf("fromYAML() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("fromYAML() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_fromYAMLArray(t *testing.T) { + type args struct { + str string + } + tests := []struct { + name string + args args + want []interface{} + wantErr bool + }{{ + name: "test", + args: args{ + str: ``, + }, + want: nil, + }, { + name: "test", + args: args{ + str: `abc: efg`, + }, + wantErr: true, + }, { + name: "test", + args: args{ + str: ` +- a +- b +- c +`, + }, + want: []interface{}{ + "a", + "b", + "c", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := fromYAMLArray(tt.args.str) + if (err != nil) != tt.wantErr { + t.Errorf("fromYAMLArray() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("fromYAMLArray() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/gotemplate/tpl_engine.go b/internal/gotemplate/tpl_engine.go index 40cfe98dedb..6b73b713b00 100644 --- a/internal/gotemplate/tpl_engine.go +++ b/internal/gotemplate/tpl_engine.go @@ -74,6 +74,40 @@ type TplEngine struct { ctx context.Context } +type TplEngineOptions func(*TplEngine) + +type DSLType string + +const ( + DefaultDSL DSLType = "gotemplate" + KBDSL DSLType = "kbdsl" + KBDSL2 DSLType = "kbdsl2" + + TemplateBeginDelim = "{{" + TemplateEndDelim = "}}" + KBDSLBeginDelim = "{%" + KBDSLEndDelim = "%}" + KBDSL2BeginDelim = "<%" + KBDSL2EndDelim = "%>" +) + +func WithCustomizedSyntax(begin, end string) TplEngineOptions { + return func(t *TplEngine) { + t.tpl.Delims(begin, end) + } +} + +func WithCustomizedWithType(dsl DSLType) TplEngineOptions { + switch dsl { + case KBDSL: + return WithCustomizedSyntax(KBDSLBeginDelim, KBDSLEndDelim) + case KBDSL2: + return WithCustomizedSyntax(KBDSL2BeginDelim, KBDSL2EndDelim) + default: + return WithCustomizedSyntax(TemplateBeginDelim, TemplateEndDelim) + } +} + func (t *TplEngine) GetTplEngine() *template.Template { return t.tpl } @@ -180,7 +214,7 @@ func (t *TplEngine) importSelfModuleFuncs(funcs map[string]functional, fn func(t } // NewTplEngine creates go template helper -func NewTplEngine(values *TplValues, funcs *BuiltInObjectsFunc, tplName string, cli types2.ReadonlyClient, ctx context.Context) *TplEngine { +func NewTplEngine(values *TplValues, funcs *BuiltInObjectsFunc, tplName string, cli types2.ReadonlyClient, ctx context.Context, options ...TplEngineOptions) *TplEngine { coreBuiltinFuncs := sprig.TxtFuncMap() // custom funcs @@ -200,5 +234,8 @@ func NewTplEngine(values *TplValues, funcs *BuiltInObjectsFunc, tplName string, } engine.initSystemFunMap(coreBuiltinFuncs) + if len(options) > 0 { + options[0](&engine) + } return &engine } diff --git a/internal/gotemplate/tpl_engine_test.go b/internal/gotemplate/tpl_engine_test.go index a871bd0bc98..88596fdd325 100644 --- a/internal/gotemplate/tpl_engine_test.go +++ b/internal/gotemplate/tpl_engine_test.go @@ -240,4 +240,42 @@ mathAvg = [8-9][0-9]\.?\d*` }) }) + Context("customized syntax test", func() { + It("KB1 syntax", func() { + engine := NewTplEngine(&TplValues{}, nil, "for_test", nil, ctx, WithCustomizedWithType(KBDSL)) + r, err := engine.Render(KBDSLBeginDelim + ` snakecase "getUserName" ` + KBDSLEndDelim) + Expect(err).Should(Succeed()) + Expect("get_user_name").Should(BeEquivalentTo(r)) + }) + + It("KB2 syntax", func() { + engine := NewTplEngine(&TplValues{}, nil, "for_test", nil, ctx, WithCustomizedWithType(KBDSL2)) + r, err := engine.Render(KBDSL2BeginDelim + ` camelcase "get_user_name" ` + KBDSL2EndDelim) + Expect(err).Should(Succeed()) + Expect("GetUserName").Should(BeEquivalentTo(r)) + }) + + It("Default syntax", func() { + engine := NewTplEngine(&TplValues{}, nil, "for_test", nil, ctx, WithCustomizedWithType(DefaultDSL)) + r, err := engine.Render(TemplateBeginDelim + ` camelcase "get_user_name" ` + TemplateEndDelim) + Expect(err).Should(Succeed()) + Expect("GetUserName").Should(BeEquivalentTo(r)) + }) + + It("customized syntax", func() { + engine := NewTplEngine(&TplValues{}, nil, "for_test", nil, ctx, WithCustomizedSyntax("-------", "======")) + r, err := engine.Render(`------- camelcase "get_user_name" ======`) + Expect(err).Should(Succeed()) + Expect("GetUserName").Should(BeEquivalentTo(r)) + }) + + It("default syntax", func() { + engine := NewTplEngine(&TplValues{}, nil, "for_test", nil, ctx) + r, _ := engine.Render(KBDSL2BeginDelim + ` snakecase "getUserName" ` + KBDSL2EndDelim) + Expect(`<% snakecase "getUserName" %>`).Should(BeEquivalentTo(r)) + r, _ = engine.Render(KBDSLBeginDelim + ` camelcase "get_user_name" ` + KBDSLEndDelim) + Expect(`{% camelcase "get_user_name" %}`).Should(BeEquivalentTo(r)) + }) + }) + }) From 52d9fcdfd46d7e0b47095b802ff649360b39d593 Mon Sep 17 00:00:00 2001 From: Leon Date: Mon, 25 Sep 2023 14:14:17 +0800 Subject: [PATCH 23/58] fix: check VolumeSnapshot for each cluster at h-scaling (#4967) --- controllers/apps/cluster_controller_test.go | 95 +++++++++------- .../components/hscale_volume_populator.go | 73 +++++++++--- .../apps/opsrequest_controller_test.go | 19 ++-- internal/testutil/k8s/storage_util.go | 107 ++++++++++++++++++ 4 files changed, 228 insertions(+), 66 deletions(-) diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 70617c21118..2e674c3430b 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -485,16 +485,19 @@ var _ = Describe("Cluster Controller", func() { return fmt.Sprintf("%s-%s-%s-%d", vctName, clusterKey.Name, compName, i) } - createPVC := func(clusterName, pvcName, compName string) { - testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcName, clusterName, - compName, "data").SetStorage("1Gi").AddLabelsInMap(map[string]string{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: compName, - constant.AppManagedByLabelKey: constant.AppName, - }).CheckedCreate(&testCtx) + createPVC := func(clusterName, pvcName, compName, storageClassName string) { + testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcName, clusterName, compName, "data"). + AddLabelsInMap(map[string]string{ + constant.AppInstanceLabelKey: clusterName, + constant.KBAppComponentLabelKey: compName, + constant.AppManagedByLabelKey: constant.AppName, + }). + SetStorage("1Gi"). + SetStorageClass(storageClassName). + CheckedCreate(&testCtx) } - mockComponentPVCsBound := func(comp *appsv1alpha1.ClusterComponentSpec, replicas int, create bool) { + mockComponentPVCsBound := func(comp *appsv1alpha1.ClusterComponentSpec, replicas int, create bool, storageClassName string) { for i := 0; i < replicas; i++ { for _, vct := range comp.VolumeClaimTemplates { pvcKey := types.NamespacedName{ @@ -502,7 +505,7 @@ var _ = Describe("Cluster Controller", func() { Name: getPVCName(vct.Name, comp.Name, i), } if create { - createPVC(clusterKey.Name, pvcKey.Name, comp.Name) + createPVC(clusterKey.Name, pvcKey.Name, comp.Name, storageClassName) } Eventually(testapps.CheckObjExists(&testCtx, pvcKey, &corev1.PersistentVolumeClaim{}, true)).Should(Succeed()) @@ -551,9 +554,10 @@ var _ = Describe("Cluster Controller", func() { return pods } - horizontalScaleComp := func(updatedReplicas int, comp *appsv1alpha1.ClusterComponentSpec, policy *appsv1alpha1.HorizontalScalePolicy) { + horizontalScaleComp := func(updatedReplicas int, comp *appsv1alpha1.ClusterComponentSpec, + storageClassName string, policy *appsv1alpha1.HorizontalScalePolicy) { By("Mocking component PVCs to bound") - mockComponentPVCsBound(comp, int(comp.Replicas), true) + mockComponentPVCsBound(comp, int(comp.Replicas), true, storageClassName) By("Checking rsm replicas right") rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) @@ -609,7 +613,7 @@ var _ = Describe("Cluster Controller", func() { backup.Status.BackupToolName = backupToolName })()).Should(Succeed()) - if viper.GetBool("VOLUMESNAPSHOT") { + if testk8s.IsMockVolumeSnapshotEnabled(&testCtx, storageClassName) { By("Mocking VolumeSnapshot and set it as ReadyToUse") pvcName := getPVCName(testapps.DataVolumeName, comp.Name, 0) volumeSnapshot := &snapshotv1.VolumeSnapshot{ @@ -642,8 +646,9 @@ var _ = Describe("Cluster Controller", func() { constant.KBAppComponentLabelKey: comp.Name, }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(updatedReplicas * len(comp.VolumeClaimTemplates))) + volumeSnapshotEnabled := testk8s.IsMockVolumeSnapshotEnabled(&testCtx, testk8s.DefaultStorageClassName) if policy != nil { - if !viper.GetBool("VOLUMESNAPSHOT") && len(viper.GetString(constant.CfgKeyBackupPVCName)) > 0 { + if !volumeSnapshotEnabled && len(viper.GetString(constant.CfgKeyBackupPVCName)) > 0 { By("Checking restore job created") Eventually(testapps.List(&testCtx, generics.JobSignature, client.MatchingLabels{ @@ -670,17 +675,17 @@ var _ = Describe("Cluster Controller", func() { } By("Mock PVCs status to bound") - mockComponentPVCsBound(comp, updatedReplicas, false) + mockComponentPVCsBound(comp, updatedReplicas, false, "") if policy != nil { - By("Checking backup job cleanup") + By("Checking backup cleanup") Eventually(testapps.List(&testCtx, generics.BackupSignature, client.MatchingLabels{ constant.AppInstanceLabelKey: clusterKey.Name, constant.KBAppComponentLabelKey: comp.Name, }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(0)) - if !viper.GetBool("VOLUMESNAPSHOT") && len(viper.GetString(constant.CfgKeyBackupPVCName)) > 0 { + if !volumeSnapshotEnabled && len(viper.GetString(constant.CfgKeyBackupPVCName)) > 0 { By("Checking restore job cleanup") Eventually(testapps.List(&testCtx, generics.JobSignature, client.MatchingLabels{ @@ -842,7 +847,8 @@ var _ = Describe("Cluster Controller", func() { // @argument componentDefsWithHScalePolicy assign ClusterDefinition.spec.componentDefs[].horizontalScalePolicy for // the matching names. If not provided, will set 1st ClusterDefinition.spec.componentDefs[0].horizontalScalePolicy. - horizontalScale := func(updatedReplicas int, policyType appsv1alpha1.HScaleDataClonePolicyType, componentDefsWithHScalePolicy ...string) { + horizontalScale := func(updatedReplicas int, storageClassName string, + policyType appsv1alpha1.HScaleDataClonePolicyType, componentDefsWithHScalePolicy ...string) { defer lorry.UnsetMockClient() cluster := &appsv1alpha1.Cluster{} @@ -853,7 +859,7 @@ var _ = Describe("Cluster Controller", func() { By("Mocking all components' PVCs to bound") for _, comp := range clusterObj.Spec.ComponentSpecs { - mockComponentPVCsBound(&comp, int(comp.Replicas), true) + mockComponentPVCsBound(&comp, int(comp.Replicas), true, storageClassName) } hscalePolicy := func(comp appsv1alpha1.ClusterComponentSpec) *appsv1alpha1.HorizontalScalePolicy { @@ -871,7 +877,7 @@ var _ = Describe("Cluster Controller", func() { lorry.SetMockClient(&mockLorryClient{replicas: updatedReplicas, clusterKey: clusterKey, compName: comp.Name}, nil) By(fmt.Sprintf("H-scale component %s with policy %s", comp.Name, hscalePolicy(comp))) - horizontalScaleComp(updatedReplicas, &clusterObj.Spec.ComponentSpecs[i], hscalePolicy(comp)) + horizontalScaleComp(updatedReplicas, &clusterObj.Spec.ComponentSpecs[i], storageClassName, hscalePolicy(comp)) } By("Checking cluster status and the number of replicas changed") @@ -896,15 +902,14 @@ var _ = Describe("Cluster Controller", func() { waitForCreatingResourceCompletely(clusterKey, compName) // REVIEW: this test flow, wait for running phase? - viper.Set("VOLUMESNAPSHOT", true) + testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) viper.Set(constant.CfgKeyBackupPVCName, "") - horizontalScale(int(updatedReplicas), dataClonePolicy, compDefName) + horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, dataClonePolicy, compDefName) } - testVolumeExpansion := func(compName, compDefName string) { + testVolumeExpansion := func(compName, compDefName string, storageClass *storagev1.StorageClass) { var ( - storageClassName = "sc-mock" replicas = 3 volumeSize = "1Gi" newVolumeSize = "2Gi" @@ -913,15 +918,7 @@ var _ = Describe("Cluster Controller", func() { ) By("Mock a StorageClass which allows resize") - allowVolumeExpansion := true - storageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClassName, - }, - Provisioner: "kubernetes.io/no-provisioner", - AllowVolumeExpansion: &allowVolumeExpansion, - } - Expect(testCtx.CreateObj(testCtx.Ctx, storageClass)).Should(Succeed()) + Expect(*storageClass.AllowVolumeExpansion).Should(BeTrue()) By("Creating a cluster with VolumeClaimTemplate") pvcSpec := testapps.NewPVCSpec(volumeSize) @@ -1591,7 +1588,7 @@ var _ = Describe("Cluster Controller", func() { testBackupError := func(compName, compDefName string) { initialReplicas := int32(1) updatedReplicas := int32(3) - viper.Set("VOLUMESNAPSHOT", true) + testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) By("Set HorizontalScalePolicy") Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), @@ -1620,6 +1617,11 @@ var _ = Describe("Cluster Controller", func() { waitForCreatingResourceCompletely(clusterKey, compName) Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(1)) + By("Create and Mock PVCs status to bound") + for _, comp := range clusterObj.Spec.ComponentSpecs { + mockComponentPVCsBound(&comp, int(comp.Replicas), true, testk8s.DefaultStorageClassName) + } + By(fmt.Sprintf("Changing replicas to %d", updatedReplicas)) changeCompReplicas(clusterKey, updatedReplicas, &clusterObj.Spec.ComponentSpecs[0]) Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(2)) @@ -1645,7 +1647,7 @@ var _ = Describe("Cluster Controller", func() { By("Checking cluster status failed with backup error") Eventually(testapps.CheckObj(&testCtx, clusterKey, func(g Gomega, cluster *appsv1alpha1.Cluster) { - g.Expect(viper.GetBool("VOLUMESNAPSHOT")).Should(BeTrue()) + g.Expect(testk8s.IsMockVolumeSnapshotEnabled(&testCtx, testk8s.DefaultStorageClassName)).Should(BeTrue()) g.Expect(cluster.Status.Conditions).ShouldNot(BeEmpty()) var err error for _, cond := range cluster.Status.Conditions { @@ -1919,7 +1921,7 @@ var _ = Describe("Cluster Controller", func() { // statefulCompDefName not in componentDefsWithHScalePolicy, for nil backup policy test // REVIEW: // 1. this test flow, wait for running phase? - horizontalScale(int(updatedReplicas), policyType, consensusCompDefName, replicationCompDefName) + horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, policyType, consensusCompDefName, replicationCompDefName) } It("should create all sub-resources successfully, with terminationPolicy=Halt lifecycle", func() { @@ -1946,7 +1948,7 @@ var _ = Describe("Cluster Controller", func() { Namespace: clusterKey.Namespace, Name: getPVCName(testapps.DataVolumeName, compName, i), } - createPVC(clusterKey.Name, pvcKey.Name, compName) + createPVC(clusterKey.Name, pvcKey.Name, compName, "") Eventually(testapps.CheckObjExists(&testCtx, pvcKey, &corev1.PersistentVolumeClaim{}, true)).Should(Succeed()) Expect(testapps.GetAndChangeObjStatus(&testCtx, pvcKey, func(pvc *corev1.PersistentVolumeClaim) { pvc.Status.Phase = corev1.ClaimBound @@ -2028,13 +2030,13 @@ var _ = Describe("Cluster Controller", func() { }) It("should successfully h-scale with multiple components", func() { - viper.Set("VOLUMESNAPSHOT", true) + testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) viper.Set(constant.CfgKeyBackupPVCName, "") testMultiCompHScale(appsv1alpha1.HScaleDataClonePolicyCloneVolume) }) It("should successfully h-scale with multiple components by backup tool", func() { - viper.Set("VOLUMESNAPSHOT", false) + testk8s.MockDisableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) viper.Set(constant.CfgKeyBackupPVCName, "test-backup-pvc") testMultiCompHScale(appsv1alpha1.HScaleDataClonePolicyCloneVolume) }) @@ -2255,6 +2257,10 @@ var _ = Describe("Cluster Controller", func() { }) When("creating cluster with stateful workloadTypes (being Stateful|Consensus|Replication) component", func() { + var ( + mockStorageClass *storagev1.StorageClass + ) + compNameNDef := map[string]string{ statefulCompName: statefulCompDefName, consensusCompName: consensusCompDefName, @@ -2264,12 +2270,13 @@ var _ = Describe("Cluster Controller", func() { BeforeEach(func() { createAllWorkloadTypesClusterDef() createBackupPolicyTpl(clusterDefObj) + mockStorageClass = testk8s.CreateMockStorageClass(&testCtx, testk8s.DefaultStorageClassName) }) for compName, compDefName := range compNameNDef { Context(fmt.Sprintf("[comp: %s] volume expansion", compName), func() { It("should update PVC request storage size accordingly", func() { - testVolumeExpansion(compName, compDefName) + testVolumeExpansion(compName, compDefName, mockStorageClass) }) It("should be able to recover if volume expansion fails", func() { @@ -2305,15 +2312,15 @@ var _ = Describe("Cluster Controller", func() { Context(fmt.Sprintf("[comp: %s] scale-out after volume expansion", compName), func() { It("scale-out with data clone policy", func() { - testVolumeExpansion(compName, compDefName) - viper.Set("VOLUMESNAPSHOT", true) + testVolumeExpansion(compName, compDefName, mockStorageClass) + testk8s.MockEnableVolumeSnapshot(&testCtx, mockStorageClass.Name) viper.Set(constant.CfgKeyBackupPVCName, "") - horizontalScale(5, appsv1alpha1.HScaleDataClonePolicyCloneVolume, compDefName) + horizontalScale(5, mockStorageClass.Name, appsv1alpha1.HScaleDataClonePolicyCloneVolume, compDefName) }) It("scale-out without data clone policy", func() { - testVolumeExpansion(compName, compDefName) - horizontalScale(5, "", compDefName) + testVolumeExpansion(compName, compDefName, mockStorageClass) + horizontalScale(5, mockStorageClass.Name, "", compDefName) }) }) } diff --git a/controllers/apps/components/hscale_volume_populator.go b/controllers/apps/components/hscale_volume_populator.go index cecb572ec86..2fb3446284c 100644 --- a/controllers/apps/components/hscale_volume_populator.go +++ b/controllers/apps/components/hscale_volume_populator.go @@ -20,12 +20,14 @@ along with this program. If not, see . package components import ( + "context" "fmt" snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,7 +39,6 @@ import ( "github.com/apecloud/kubeblocks/internal/controller/factory" "github.com/apecloud/kubeblocks/internal/controller/plan" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - viper "github.com/apecloud/kubeblocks/internal/viperx" ) type dataClone interface { @@ -88,7 +89,11 @@ func newDataClone(reqCtx intctrlutil.RequestCtx, }, nil } if component.HorizontalScalePolicy.Type == appsv1alpha1.HScaleDataClonePolicyCloneVolume { - if viper.GetBool("VOLUMESNAPSHOT") { + volumeSnapshotEnabled, err := isVolumeSnapshotEnabled(reqCtx.Ctx, cli, stsObj, backupVCT(component)) + if err != nil { + return nil, err + } + if volumeSnapshotEnabled { return &snapshotDataClone{ baseDataClone{ reqCtx: reqCtx, @@ -224,17 +229,7 @@ func (d *baseDataClone) allVCTs() []*corev1.PersistentVolumeClaimTemplate { } func (d *baseDataClone) backupVCT() *corev1.PersistentVolumeClaimTemplate { - // TODO: is it possible that component.VolumeClaimTemplates may be empty? - vct := d.component.VolumeClaimTemplates[0] - for _, tmpVct := range d.component.VolumeClaimTemplates { - for _, volumeType := range d.component.VolumeTypes { - if volumeType.Type == appsv1alpha1.VolumeTypeData && volumeType.Name == tmpVct.Name { - vct = tmpVct - break - } - } - } - return &vct + return backupVCT(d.component) } func (d *baseDataClone) excludeBackupVCTs() []*corev1.PersistentVolumeClaimTemplate { @@ -456,7 +451,7 @@ func (d *snapshotDataClone) checkedCreatePVCFromSnapshot(pvcKey types.Namespaced return nil, err } if len(vsList.Items) == 0 { - return nil, fmt.Errorf("volumesnapshot not found in cluster %s component %s", d.cluster.Name, d.component.Name) + return nil, fmt.Errorf("volumesnapshot not found for cluster %s component %s", d.cluster.Name, d.component.Name) } // exclude volumes that are deleting vsName := "" @@ -671,3 +666,53 @@ func getBackupPolicyFromTemplate(reqCtx intctrlutil.RequestCtx, } return nil, nil } + +func backupVCT(component *component.SynthesizedComponent) *corev1.PersistentVolumeClaimTemplate { + if len(component.VolumeClaimTemplates) == 0 { + return nil + } + vct := component.VolumeClaimTemplates[0] + for _, tmpVct := range component.VolumeClaimTemplates { + for _, volumeType := range component.VolumeTypes { + if volumeType.Type == appsv1alpha1.VolumeTypeData && volumeType.Name == tmpVct.Name { + vct = tmpVct + break + } + } + } + return &vct +} + +func isVolumeSnapshotEnabled(ctx context.Context, cli client.Client, + sts *appsv1.StatefulSet, vct *corev1.PersistentVolumeClaimTemplate) (bool, error) { + if sts == nil || vct == nil { + return false, nil + } + pvcKey := types.NamespacedName{ + Namespace: sts.Namespace, + Name: fmt.Sprintf("%s-%s-%d", vct.Name, sts.Name, 0), + } + pvc := corev1.PersistentVolumeClaim{} + if err := cli.Get(ctx, pvcKey, &pvc); err != nil { + return false, client.IgnoreNotFound(err) + } + if pvc.Spec.StorageClassName == nil { + return false, nil + } + + storageClass := storagev1.StorageClass{} + if err := cli.Get(ctx, types.NamespacedName{Name: *pvc.Spec.StorageClassName}, &storageClass); err != nil { + return false, client.IgnoreNotFound(err) + } + + vscList := snapshotv1.VolumeSnapshotClassList{} + if err := cli.List(ctx, &vscList); err != nil { + return false, err + } + for _, vsc := range vscList.Items { + if vsc.Driver == storageClass.Provisioner { + return true, nil + } + } + return false, nil +} diff --git a/controllers/apps/opsrequest_controller_test.go b/controllers/apps/opsrequest_controller_test.go index 8e7f52ac22c..7f5f3faf18a 100644 --- a/controllers/apps/opsrequest_controller_test.go +++ b/controllers/apps/opsrequest_controller_test.go @@ -44,7 +44,6 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" - viper "github.com/apecloud/kubeblocks/internal/viperx" lorry "github.com/apecloud/kubeblocks/lorry/client" ) @@ -322,7 +321,7 @@ var _ = Describe("OpsRequest Controller", func() { Context("with Cluster which has MySQL ConsensusSet", func() { BeforeEach(func() { By("Create a clusterDefinition obj") - viper.Set("VOLUMESNAPSHOT", true) + testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) clusterDefObj = testapps.NewClusterDefFactory(clusterDefName). AddComponentDef(testapps.ConsensusMySQLComponent, mysqlCompDefName). AddHorizontalScalePolicy(appsv1alpha1.HorizontalScalePolicy{ @@ -364,7 +363,7 @@ var _ = Describe("OpsRequest Controller", func() { createBackupPolicyTpl(clusterDefObj) By("set component to horizontal with snapshot policy and create a cluster") - viper.Set("VOLUMESNAPSHOT", true) + testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) if clusterDefObj.Spec.ComponentDefs[0].HorizontalScalePolicy == nil { Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(clusterDef *appsv1alpha1.ClusterDefinition) { @@ -388,7 +387,11 @@ var _ = Describe("OpsRequest Controller", func() { for i := 0; i < int(replicas); i++ { pvcName := fmt.Sprintf("%s-%s-%s-%d", testapps.DataVolumeName, clusterKey.Name, mysqlCompName, i) pvc := testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcName, clusterKey.Name, - mysqlCompName, testapps.DataVolumeName).SetStorage("1Gi").Create(&testCtx).GetObject() + mysqlCompName, testapps.DataVolumeName). + SetStorage("1Gi"). + SetStorageClass(testk8s.DefaultStorageClassName). + Create(&testCtx). + GetObject() // mock pvc bound Expect(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(pvc), func(pvc *corev1.PersistentVolumeClaim) { pvc.Status.Phase = corev1.ClaimBound @@ -429,7 +432,7 @@ var _ = Describe("OpsRequest Controller", func() { It("HorizontalScaling when not support snapshot", func() { By("init backup policy template, mysql cluster and hscale ops") - viper.Set("VOLUMESNAPSHOT", false) + testk8s.MockDisableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) createMysqlCluster(3) cluster := &appsv1alpha1.Cluster{} @@ -471,7 +474,7 @@ var _ = Describe("OpsRequest Controller", func() { It("HorizontalScaling via volume snapshot backup", func() { By("init backup policy template, mysql cluster and hscale ops") - viper.Set("VOLUMESNAPSHOT", true) + testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) createMysqlCluster(3) replicas := int32(5) @@ -624,7 +627,7 @@ var _ = Describe("OpsRequest Controller", func() { It("delete Running opsRequest", func() { By("Create a horizontalScaling ops") - viper.Set("VOLUMESNAPSHOT", true) + testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) createMysqlCluster(3) ops := createClusterHscaleOps(5) opsKey := client.ObjectKeyFromObject(ops) @@ -652,7 +655,7 @@ var _ = Describe("OpsRequest Controller", func() { It("cancel HorizontalScaling opsRequest which is Running", func() { By("create cluster and mock it to running") - viper.Set("VOLUMESNAPSHOT", false) + testk8s.MockDisableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) oldReplicas := int32(3) createMysqlCluster(oldReplicas) mockCompRunning(oldReplicas) diff --git a/internal/testutil/k8s/storage_util.go b/internal/testutil/k8s/storage_util.go index 77f0fb01343..53b01248b7a 100644 --- a/internal/testutil/k8s/storage_util.go +++ b/internal/testutil/k8s/storage_util.go @@ -20,13 +20,23 @@ along with this program. If not, see . package testutil import ( + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "github.com/onsi/gomega" storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubectl/pkg/util/storage" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/apecloud/kubeblocks/internal/testutil" ) +var ( + DefaultStorageClassName string = "default-sc-for-testing" + defaultVolumeSnapshotClassName string = "default-vsc-for-testing" + defaultProvisioner string = "testing.kubeblocks.io" +) + func GetDefaultStorageClass(testCtx *testutil.TestContext) *storagev1.StorageClass { scList := &storagev1.StorageClassList{} gomega.Expect(testCtx.Cli.List(testCtx.Ctx, scList)).Should(gomega.Succeed()) @@ -49,3 +59,100 @@ func GetDefaultStorageClass(testCtx *testutil.TestContext) *storagev1.StorageCla func isDefaultStorageClassAnnotation(storageClass *storagev1.StorageClass) bool { return storageClass.Annotations != nil && storageClass.Annotations[storage.IsDefaultStorageClassAnnotation] == "true" } + +func CreateMockStorageClass(testCtx *testutil.TestContext, storageClassName string) *storagev1.StorageClass { + sc := getStorageClass(testCtx, storageClassName) + if sc == nil { + sc = createStorageClass(testCtx, storageClassName) + } + return sc +} + +func MockEnableVolumeSnapshot(testCtx *testutil.TestContext, storageClassName string) { + sc := getStorageClass(testCtx, storageClassName) + if sc == nil { + sc = createStorageClass(testCtx, storageClassName) + } + vsc := getVolumeSnapshotClass(testCtx, sc) + if vsc == nil { + createVolumeSnapshotClass(testCtx, sc) + } + gomega.Expect(IsMockVolumeSnapshotEnabled(testCtx, storageClassName)).Should(gomega.BeTrue()) +} + +func MockDisableVolumeSnapshot(testCtx *testutil.TestContext, storageClassName string) { + sc := getStorageClass(testCtx, storageClassName) + if sc != nil { + deleteVolumeSnapshotClass(testCtx, sc) + deleteStorageClass(testCtx, storageClassName) + } +} + +func IsMockVolumeSnapshotEnabled(testCtx *testutil.TestContext, storageClassName string) bool { + sc := getStorageClass(testCtx, storageClassName) + if sc == nil { + return false + } + return getVolumeSnapshotClass(testCtx, sc) != nil +} + +func getStorageClass(testCtx *testutil.TestContext, storageClassName string) *storagev1.StorageClass { + sc := &storagev1.StorageClass{} + if err := testCtx.Cli.Get(testCtx.Ctx, types.NamespacedName{Name: storageClassName}, sc); err != nil { + if client.IgnoreNotFound(err) == nil { + return nil + } + gomega.Expect(err).Should(gomega.Succeed()) + } + return sc +} + +func createStorageClass(testCtx *testutil.TestContext, storageClassName string) *storagev1.StorageClass { + allowVolumeExpansion := true + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + }, + Provisioner: defaultProvisioner, + AllowVolumeExpansion: &allowVolumeExpansion, + } + gomega.Expect(testCtx.Cli.Create(testCtx.Ctx, sc)).Should(gomega.Succeed()) + return sc +} + +func deleteStorageClass(testCtx *testutil.TestContext, storageClassName string) { + sc := getStorageClass(testCtx, storageClassName) + if sc != nil { + gomega.Expect(testCtx.Cli.Delete(testCtx.Ctx, sc)).Should(gomega.Succeed()) + } +} + +func getVolumeSnapshotClass(testCtx *testutil.TestContext, storageClass *storagev1.StorageClass) *snapshotv1.VolumeSnapshotClass { + vscList := &snapshotv1.VolumeSnapshotClassList{} + gomega.Expect(testCtx.Cli.List(testCtx.Ctx, vscList)).Should(gomega.Succeed()) + for i, vsc := range vscList.Items { + if vsc.Driver == storageClass.Provisioner { + return &vscList.Items[i] + } + } + return nil +} + +func createVolumeSnapshotClass(testCtx *testutil.TestContext, storageClass *storagev1.StorageClass) *snapshotv1.VolumeSnapshotClass { + vsc := &snapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultVolumeSnapshotClassName, + }, + Driver: storageClass.Provisioner, + DeletionPolicy: snapshotv1.VolumeSnapshotContentDelete, + } + gomega.Expect(testCtx.Cli.Create(testCtx.Ctx, vsc)).Should(gomega.Succeed()) + return vsc +} + +func deleteVolumeSnapshotClass(testCtx *testutil.TestContext, storageClass *storagev1.StorageClass) { + vsc := getVolumeSnapshotClass(testCtx, storageClass) + if vsc != nil { + gomega.Expect(testCtx.Cli.Delete(testCtx.Ctx, vsc)).Should(gomega.Succeed()) + } +} From 375d79e01876bdc0fc60c7de5681225266383e54 Mon Sep 17 00:00:00 2001 From: Vettal <92501707+vettalwu@users.noreply.github.com> Date: Mon, 25 Sep 2023 14:23:07 +0800 Subject: [PATCH 24/58] feat: Integrate PolarDB-X into KubeBlocks. (#5245) --- deploy/polardbx-cluster/.helmignore | 24 + deploy/polardbx-cluster/Chart.yaml | 19 + deploy/polardbx-cluster/templates/NOTES.txt | 14 + .../polardbx-cluster/templates/_helpers.tpl | 62 + .../polardbx-cluster/templates/cluster.yaml | 99 + deploy/polardbx-cluster/values.yaml | 110 + deploy/polardbx/.helmignore | 23 + deploy/polardbx/Chart.yaml | 19 + .../dashboards/polardbx-overview.json | 4626 +++++++++++++++++ deploy/polardbx/scripts/gms-init.sql | 153 + deploy/polardbx/scripts/gms-metadata.tpl | 23 + deploy/polardbx/scripts/metadb-setup.tpl | 37 + deploy/polardbx/scripts/xstore-post-start.tpl | 24 + deploy/polardbx/scripts/xstore-setup.tpl | 17 + deploy/polardbx/templates/NOTES.txt | 14 + deploy/polardbx/templates/_helpers.tpl | 62 + .../polardbx/templates/clusterDefintion.yaml | 694 +++ deploy/polardbx/templates/clusterVersion.yaml | 64 + .../templates/configmap-dashboards.yaml | 19 + .../polardbx/templates/scriptstemplate.yaml | 17 + deploy/polardbx/values.yaml | 61 + 21 files changed, 6181 insertions(+) create mode 100644 deploy/polardbx-cluster/.helmignore create mode 100644 deploy/polardbx-cluster/Chart.yaml create mode 100644 deploy/polardbx-cluster/templates/NOTES.txt create mode 100644 deploy/polardbx-cluster/templates/_helpers.tpl create mode 100644 deploy/polardbx-cluster/templates/cluster.yaml create mode 100644 deploy/polardbx-cluster/values.yaml create mode 100644 deploy/polardbx/.helmignore create mode 100644 deploy/polardbx/Chart.yaml create mode 100644 deploy/polardbx/dashboards/polardbx-overview.json create mode 100644 deploy/polardbx/scripts/gms-init.sql create mode 100644 deploy/polardbx/scripts/gms-metadata.tpl create mode 100644 deploy/polardbx/scripts/metadb-setup.tpl create mode 100644 deploy/polardbx/scripts/xstore-post-start.tpl create mode 100644 deploy/polardbx/scripts/xstore-setup.tpl create mode 100644 deploy/polardbx/templates/NOTES.txt create mode 100644 deploy/polardbx/templates/_helpers.tpl create mode 100644 deploy/polardbx/templates/clusterDefintion.yaml create mode 100644 deploy/polardbx/templates/clusterVersion.yaml create mode 100644 deploy/polardbx/templates/configmap-dashboards.yaml create mode 100644 deploy/polardbx/templates/scriptstemplate.yaml create mode 100644 deploy/polardbx/values.yaml diff --git a/deploy/polardbx-cluster/.helmignore b/deploy/polardbx-cluster/.helmignore new file mode 100644 index 00000000000..a2391b0ad18 --- /dev/null +++ b/deploy/polardbx-cluster/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +*.lock diff --git a/deploy/polardbx-cluster/Chart.yaml b/deploy/polardbx-cluster/Chart.yaml new file mode 100644 index 00000000000..0b732a67078 --- /dev/null +++ b/deploy/polardbx-cluster/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: polardbx-cluster +description: PolarDB-X Cluster Helm Chart for KubeBlocks. + +type: application +version: 0.1.0 +appVersion: v1.4.1 + +keywords: +- polardbx +- database +- distributed +- cloud-native + +home: https://polardbx.com/home + +maintainers: +- name: Vettal Wu + email: vettal.wd@alibaba-inc.com diff --git a/deploy/polardbx-cluster/templates/NOTES.txt b/deploy/polardbx-cluster/templates/NOTES.txt new file mode 100644 index 00000000000..3b27cadf8f2 --- /dev/null +++ b/deploy/polardbx-cluster/templates/NOTES.txt @@ -0,0 +1,14 @@ +Thanks for installing PolarDB-X using KubeBlocks! + +1. Run the following command to create your first PolarDB-X cluster: + +``` +kbcli cluster create pxc --cluster-definition polardbx +``` + +2. Port-forward service to localhost and connect to PolarDB-X cluster: + +``` +kubectl port-forward svc/pxc-cn 3306:3306 +mysql -h127.0.0.1 -upolardbx_root +``` \ No newline at end of file diff --git a/deploy/polardbx-cluster/templates/_helpers.tpl b/deploy/polardbx-cluster/templates/_helpers.tpl new file mode 100644 index 00000000000..d23cd372e75 --- /dev/null +++ b/deploy/polardbx-cluster/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "polardbx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "polardbx.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "polardbx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "polardbx.labels" -}} +helm.sh/chart: {{ include "polardbx.chart" . }} +{{ include "polardbx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "polardbx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "polardbx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "polardbx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "polardbx.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/deploy/polardbx-cluster/templates/cluster.yaml b/deploy/polardbx-cluster/templates/cluster.yaml new file mode 100644 index 00000000000..f841eef16f8 --- /dev/null +++ b/deploy/polardbx-cluster/templates/cluster.yaml @@ -0,0 +1,99 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: {{ include "polardbx.name" . }} + labels: + {{ include "polardbx.labels" . | nindent 4 }} +spec: + clusterDefinitionRef: polardbx + clusterVersionRef: polardbx-{{ default .Chart.AppVersion .Values.clusterVersionOverride }} + terminationPolicy: {{ .Values.polardbx.terminationPolicy }} + componentSpecs: + - componentDefRef: gms + name: gms + replicas: {{ .Values.gms.replicas }} + {{- with .Values.gms.resources }} + resources: + {{- with .limits }} + limits: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- with .requests }} + requests: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- end }} + {{- if .Values.gms.persistence.enabled }} + volumeClaimTemplates: + - name: data # ref clusterdefinition components.containers.volumeMounts.name + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.gms.persistence.data.size }} + {{- end }} + {{- $i := 0 }} + {{- range .Values.dn }} + - componentDefRef: dn + name: dn-{{ $i }} + replicas: {{ .replicas }} + {{- with .resources }} + resources: + {{- with .limits }} + limits: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- with .requests }} + requests: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- end }} + {{- if .persistence.enabled }} + volumeClaimTemplates: + - name: data # ref clusterdefinition components.containers.volumeMounts.name + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .persistence.data.size }} + {{- end }} + {{- end }} + {{- $i = add1 $i }} + - componentDefRef: cn + name: cn + replicas: {{ .Values.cn.replicas }} + {{- with .Values.cn.resources }} + resources: + {{- with .limits }} + limits: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- with .requests }} + requests: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- end }} + - componentDefRef: cdc + name: cdc + replicas: {{ .Values.cdc.replicas }} + {{- with .Values.cn.resources }} + resources: + {{- with .limits }} + limits: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- with .requests }} + requests: + cpu: {{ .cpu | quote }} + memory: {{ .memory | quote }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/deploy/polardbx-cluster/values.yaml b/deploy/polardbx-cluster/values.yaml new file mode 100644 index 00000000000..f1225cfbf9e --- /dev/null +++ b/deploy/polardbx-cluster/values.yaml @@ -0,0 +1,110 @@ +## cluster settings for polardbx cluster +nameOverride: pxc +polardbx: + ## @param polardbx.terminationPolicy, temination policy for polardbx cluster + terminationPolicy: WipeOut + +gms: + ## @param gms.replicas data replicas of gms instance + ## Default value is 3, which means a paxos group: leader, follower, follower + replicas: 3 + + ## @param gms.resources + ## resource management for gms component + ## more info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + + + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param shard[*].persistence.enabled Enable persistence using Persistent Volume Claims + ## + enabled: true + ## `data` volume settings + ## + data: + ## @param shard[*].persistence.data.storageClassName Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClassName: + ## @param shard[*].persistence.size Size of data volume + ## + size: 20Gi + +dn: + - + ## @param dn[*].replicas data replicas of each DN instance + ## Default value is 3, which means a paxos group: leader, follower, follower + replicas: 3 + ## @param dn[*].resources + ## resource management for dn component + ## more info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param shard[*].persistence.enabled Enable persistence using Persistent Volume Claims + ## + enabled: true + ## `data` volume settings + ## + data: + ## @param shard[*].persistence.data.storageClassName Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClassName: + ## @param shard[*].persistence.size Size of data volume + ## + size: 20Gi + +cn: + ## @param cn.replicas number of polardb-x cn nodes + replicas: 2 + ## @param cn.resources + ## resource management for cn component + ## more info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + +cdc: + ## @param cdc.replicas number of polardb-x cdc nodes + replicas: 2 + ## @param cdc.resources + ## resource management for cdc component + ## more info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" \ No newline at end of file diff --git a/deploy/polardbx/.helmignore b/deploy/polardbx/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/deploy/polardbx/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/polardbx/Chart.yaml b/deploy/polardbx/Chart.yaml new file mode 100644 index 00000000000..914355e1f54 --- /dev/null +++ b/deploy/polardbx/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: polardbx-cluster +description: PolarDB-X Cluster Helm Chart for KubeBlocks. + +type: application +version: 0.1.0 +appVersion: v1.4.1 + +keywords: + - polardbx + - database + - distributed + - cloud-native + +home: https://polardbx.com/home + +maintainers: + - name: Vettal Wu + email: vettal.wd@alibaba-inc.com \ No newline at end of file diff --git a/deploy/polardbx/dashboards/polardbx-overview.json b/deploy/polardbx/dashboards/polardbx-overview.json new file mode 100644 index 00000000000..fac3adf8099 --- /dev/null +++ b/deploy/polardbx/dashboards/polardbx-overview.json @@ -0,0 +1,4626 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 7, + "x": 0, + "y": 1 + }, + "id": 15, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(irate(polardbx_stats_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "instant": true, + "interval": "", + "legendFormat": "Logical", + "queryType": "randomWalk", + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(irate(polardbx_stats_physical_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "hide": true, + "instant": true, + "interval": "", + "legendFormat": "Physical", + "refId": "B" + } + ], + "title": "QPS (Logical)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 7, + "x": 7, + "y": 1 + }, + "id": 10, + "options": { + "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(irate(polardbx_stats_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "instant": true, + "interval": "", + "legendFormat": "QPS (Logical)", + "queryType": "randomWalk", + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(irate(polardbx_stats_physical_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "QPS (Physical)", + "refId": "B" + } + ], + "title": "QPS", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 5, + "x": 14, + "y": 1 + }, + "id": 40, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polardbx_stats_best_effort_transaction_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) +\n\nsum(rate(polardbx_stats_xa_transaction_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) +\n\nsum(rate(polardbx_stats_tso_transaction_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) ", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Errors", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "TPS", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 14, + "options": { + "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(polardbx_stats_active_connections{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"})", + "instant": true, + "interval": "", + "legendFormat": "Connections", + "queryType": "randomWalk", + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(polardbx_stats_running_count{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Threads (Running)", + "refId": "B" + } + ], + "title": "Connections/Threads", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-yellow", + "value": 20 + }, + { + "color": "dark-orange", + "value": 50 + }, + { + "color": "dark-red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 7, + "x": 0, + "y": 5 + }, + "id": 16, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(delta(polardbx_stats_request_time_cost_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", + "instant": true, + "interval": "", + "legendFormat": "Logical", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Response Time (Logical)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "light-yellow", + "value": 20 + }, + { + "color": "dark-orange", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 7, + "x": 7, + "y": 5 + }, + "id": 12, + "options": { + "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(delta(polardbx_stats_request_time_cost_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", + "instant": true, + "interval": "", + "legendFormat": "RT (Logical)", + "queryType": "randomWalk", + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(delta(polardbx_stats_physical_request_time_cost_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_physical_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "RT (Physical)", + "refId": "B" + } + ], + "title": "Response Time", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 15 + }, + { + "color": "red", + "value": 30 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 5, + "x": 14, + "y": 5 + }, + "id": 18, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polardbx_stats_error_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Errors", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Errors", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "align": "center", + "displayMode": "auto", + "filterable": false, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "CPU %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "custom.displayMode", + "value": "gradient-gauge" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "MEM %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "custom.displayMode", + "value": "gradient-gauge" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "MEM" + }, + "properties": [ + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.width", + "value": 162 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "MEM (Limit)" + }, + "properties": [ + { + "id": "unit", + "value": "bytes" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Pod" + }, + "properties": [ + { + "id": "custom.filterable", + "value": true + }, + { + "id": "custom.width", + "value": 215 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Network (Recv)" + }, + "properties": [ + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.displayMode", + "value": "auto" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Network (Sent)" + }, + "properties": [ + { + "id": "unit", + "value": "bytes" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "node" + }, + "properties": [ + { + "id": "custom.width", + "value": 205 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Host" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "targetBlank": true, + "title": "Node Dashboard", + "url": "./d/fa49a4706d07a042595b664c87fb33ea/nodes?orgId=1&var-datasource=$datasource&var-instance=${__value.text}" + } + ] + }, + { + "id": "custom.width", + "value": 221 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Pod" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "targetBlank": true, + "title": "Pod Dashboard", + "url": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=${__value.text}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "CPU" + }, + "properties": [ + { + "id": "custom.width", + "value": 92 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "CPU (Limit)" + }, + "properties": [ + { + "id": "custom.width", + "value": 107 + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 96, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "frameIndex": 0, + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "CPU" + } + ] + }, + "pluginVersion": "9.2.4", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (app_kubernetes_io_instance, pod, app_kubernetes_io_component, apps_kubeblocks_io_component_name) (mysql_up{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\", app_kubernetes_io_component=\"gms\"})\nor\nsum by (app_kubernetes_io_instance, pod, app_kubernetes_io_component, apps_kubeblocks_io_component_name) (polardbx_up{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"})\nor \nsum by (app_kubernetes_io_instance, pod, app_kubernetes_io_component, apps_kubeblocks_io_component_name) (mysql_up{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\", app_kubernetes_io_component=\"dn\"})\nor \nsum by (app_kubernetes_io_instance, pod, app_kubernetes_io_component, apps_kubeblocks_io_component_name) (polardbx_cdc_up{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"})\n", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "{{ pod }}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Topology", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "pod" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Time 1": true, + "Time 2": true, + "Time 3": true, + "Time 4": true, + "Time 5": true, + "Time 6": true, + "Time 7": true, + "Time 8": true, + "Time 9": true, + "Value": true, + "Value #A": true + }, + "indexByName": { + "Time": 2, + "Value": 5, + "app_kubernetes_io_component": 1, + "app_kubernetes_io_instance": 0, + "apps_kubeblocks_io_component_name": 3, + "pod": 4 + }, + "renameByName": { + "Value #B": "CPU", + "Value #C": "CPU (Limit)", + "Value #D": "CPU %", + "Value #E": "MEM", + "Value #F": "MEM (Limit)", + "Value #G": "MEM %", + "Value #H": "Network (Recv)", + "Value #I": "Network (Sent)", + "app_kubernetes_io_component": "role", + "app_kubernetes_io_instance": "instance", + "apps_kubeblocks_io_component_name": "component_name", + "node": "Host", + "pod": "pod" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Pod" + } + ] + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 42, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], + "title": "Global Meta Service", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 20 + }, + "id": 86, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_queries{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "qps (physical)", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "QPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 20 + }, + "id": 87, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_threads_connected{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "connections", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_threads_running{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"})", + "hide": false, + "interval": "", + "legendFormat": "threads", + "range": true, + "refId": "B" + } + ], + "title": "Connections/Threads", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 20 + }, + "id": 88, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_innodb_buffer_pool_bytes_data{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "buffer pool size", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Buffer Pool Size (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 20 + }, + "id": 89, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_innodb_buffer_pool_bytes_data{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}) by (xstore_name)", + "interval": "", + "legendFormat": "{{ xstore_name }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Buffer Pool Size (GMS)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 28 + }, + "id": 90, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_buffer_pool_dirty_pages{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "dirty", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_buffer_pool_page_changes_total{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[10m]))", + "hide": false, + "interval": "", + "legendFormat": "flush", + "queryType": "randomWalk", + "range": true, + "refId": "B" + } + ], + "title": "Buffer Pool Dirty/Flush (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 28 + }, + "id": 91, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_innodb_row_lock_current_waits{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "current wait", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Row Lock Current Wait (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 28 + }, + "id": 92, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_buffer_pool_read_requests{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "reads", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_buffer_pool_write_requests{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "writes", + "queryType": "randomWalk", + "range": true, + "refId": "B" + } + ], + "title": "Logical Reads/Writes (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 28 + }, + "id": 93, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_log_writes{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "log writes", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_reads{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "data reads", + "queryType": "randomWalk", + "range": true, + "refId": "B" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_fsyncs{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", xstore_role=\"leader\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "data fsyncs", + "queryType": "randomWalk", + "refId": "C" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_os_log_fsyncs{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", xstore_role=\"leader\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "log fsyncs", + "queryType": "randomWalk", + "refId": "D" + } + ], + "title": "IOPS (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 36 + }, + "id": 94, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_written{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "data written/s", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_read{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "data read/s", + "queryType": "randomWalk", + "range": true, + "refId": "B" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_os_log_written{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", xstore_role=\"leader\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "log writtern/s", + "queryType": "randomWalk", + "refId": "C" + } + ], + "title": "IO Throughput (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 36 + }, + "id": 95, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_bytes_received{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "received/s", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_bytes_sent{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"gms\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "sent/s", + "queryType": "randomWalk", + "range": true, + "refId": "B" + } + ], + "title": "Network (Total)", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 44 + }, + "id": 4, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], + "title": "Compute Node", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 45 + }, + "id": 28, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polardbx_stats_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "qps (logical)", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polardbx_stats_physical_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "qps (physical)", + "range": true, + "refId": "B" + } + ], + "title": "QPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 45 + }, + "id": 29, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(delta(polardbx_stats_request_time_cost_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", + "interval": "", + "legendFormat": "rt (logical)", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(delta(polardbx_stats_physical_request_time_cost_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[30s])) / (sum(delta (polardbx_stats_physical_request_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[30s])) + 1) / 1000", + "hide": false, + "interval": "", + "legendFormat": "rt (physical)", + "range": true, + "refId": "B" + } + ], + "title": "Response Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 45 + }, + "id": 30, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(polardbx_stats_active_connections{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "connections", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(polardbx_stats_running_count{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"})", + "hide": false, + "interval": "", + "legendFormat": "threads (running)", + "range": true, + "refId": "B" + } + ], + "title": "Connections / Threads", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "A" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.axisLabel", + "value": "count" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "custom.fillOpacity", + "value": 0 + }, + { + "id": "custom.gradientMode", + "value": "none" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "left" + }, + { + "id": "unit", + "value": "ns" + }, + { + "id": "custom.axisLabel", + "value": "time" + }, + { + "id": "custom.fillOpacity", + "value": 0 + }, + { + "id": "custom.gradientMode", + "value": "none" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 45 + }, + "id": 31, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(idelta(polardbx_jvmstats_gc_collector_invocation_count{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m])) by (type)", + "instant": false, + "interval": "", + "legendFormat": "count - {{ type }}", + "queryType": "randomWalk", + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(idelta(polardbx_jvmstats_gc_collector_time_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]) / (idelta(polardbx_jvmstats_gc_collector_invocation_count{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]) + 1)) by (type)", + "hide": false, + "interval": "", + "legendFormat": "time - {{ type }}", + "range": true, + "refId": "B" + } + ], + "title": "GC", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "hue", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 53 + }, + "id": 32, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polardbx_stc_connection_error_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "connection error", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Connection Error", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "hue", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 53 + }, + "id": 39, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polardbx_stats_best_effort_transaction_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "best effort", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(polardbx_stats_xa_transaction_count_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "xa", + "queryType": "randomWalk", + "range": true, + "refId": "B" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "sum(rate(polardbx_stats_tso_transaction_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "tso", + "queryType": "randomWalk", + "refId": "C" + } + ], + "title": "Transactions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 53 + }, + "id": 33, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "topk(5, sum by (schema) (\n label_replace(\n rate(polardbx_stc_request_time_cost_total{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\", appname!~\"__.*__@.*|information_schema@.*|polardbx@.*\"}[1m]),\n \"schema\",\n \"$1\",\n \"appname\",\n \"(.*)@.*\"\n )\n )\n) by (schema)", + "interval": "", + "legendFormat": "{{ schema }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Processing Time (Top 5)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 53 + }, + "id": 34, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "topk(5, sum by (schema) (\n label_replace(\n rate(polardbx_stc_active_physical_connection_count{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\", appname!~\"__.*__@.*|information_schema@.*|polardbx@.*\"}[1m]),\n \"schema\",\n \"$1\",\n \"appname\",\n \"(.*)@.*\"\n )\n )\n) by (schema)", + "interval": "", + "legendFormat": "active - {{ schema }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "topk(5, sum by (schema) (\n label_replace(\n rate(polardbx_stc_pooling_physical_connection_count{polardbx_name=\"$polardbx\", namespace=\"$namespace\", appname!~\"__.*__@.*|information_schema@.*|polardbx@.*\"}[1m]),\n \"schema\",\n \"$1\",\n \"appname\",\n \"(.*)@.*\"\n )\n )\n) by (schema)", + "hide": false, + "interval": "", + "legendFormat": "pooling - {{ schema }}", + "refId": "B" + } + ], + "title": "Physical Connection (Top 5)", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 61 + }, + "id": 6, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], + "title": "Data Node", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 62 + }, + "id": 55, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_queries{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "qps (physical)", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "QPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 62 + }, + "id": 56, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_threads_connected{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "connections", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_threads_running{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"})", + "hide": false, + "interval": "", + "legendFormat": "threads", + "range": true, + "refId": "B" + } + ], + "title": "Connections/Threads", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 62 + }, + "id": 57, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_innodb_buffer_pool_bytes_data{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "buffer pool size", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Buffer Pool Size (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 62 + }, + "id": 58, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_innodb_buffer_pool_bytes_data{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}) by (xstore_name)", + "interval": "", + "legendFormat": "{{ xstore_name }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Buffer Pool Size (DN)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 70 + }, + "id": 59, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_buffer_pool_dirty_pages{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "dirty", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_buffer_pool_page_changes_total{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[10m]))", + "hide": false, + "interval": "", + "legendFormat": "flush", + "queryType": "randomWalk", + "range": true, + "refId": "B" + } + ], + "title": "Buffer Pool Dirty/Flush (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 70 + }, + "id": 60, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(mysql_global_status_innodb_row_lock_current_waits{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"})", + "interval": "", + "legendFormat": "current wait", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Row Lock Current Wait (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 70 + }, + "id": 61, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_buffer_pool_read_requests{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "reads", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_buffer_pool_write_requests{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "writes", + "queryType": "randomWalk", + "range": true, + "refId": "B" + } + ], + "title": "Logical Reads/Writes (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 70 + }, + "id": 62, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_log_writes{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "log writes", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_reads{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "data reads", + "queryType": "randomWalk", + "range": true, + "refId": "B" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_fsyncs{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", xstore_role=\"leader\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "data fsyncs", + "queryType": "randomWalk", + "refId": "C" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_os_log_fsyncs{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", xstore_role=\"leader\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "log fsyncs", + "queryType": "randomWalk", + "refId": "D" + } + ], + "title": "IOPS (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 78 + }, + "id": 63, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_written{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "data written/s", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_data_read{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "data read/s", + "queryType": "randomWalk", + "range": true, + "refId": "B" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "exemplar": true, + "expr": "sum(rate(mysql_global_status_innodb_os_log_written{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", xstore_role=\"leader\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "log writtern/s", + "queryType": "randomWalk", + "refId": "C" + } + ], + "title": "IO Throughput (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 78 + }, + "id": 64, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_bytes_received{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "interval": "", + "legendFormat": "received/s", + "queryType": "randomWalk", + "range": true, + "refId": "A" + }, + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_bytes_sent{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "sent/s", + "queryType": "randomWalk", + "range": true, + "refId": "B" + } + ], + "title": "Network (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 78 + }, + "id": 65, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_commands_total{polapp_kubernetes_io_instanceardbx_name=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\", command=~\"xa_.*\"}[1m])) by (command)", + "interval": "", + "legendFormat": "{{ command }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "XA Transactions (Total)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 78 + }, + "id": 66, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(rate(mysql_global_status_commands_total{app_kubernetes_io_instance=\"$polardbx\", app_kubernetes_io_component=\"dn\", namespace=\"$namespace\", command=~\"begin|commit|rollback\"}[1m])) by (command)", + "interval": "", + "legendFormat": "{{ command }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Transactions (Total)", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 86 + }, + "id": 68, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], + "title": "CDC Node", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 87 + }, + "id": 76, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "polardbx_cdc_dumper_delay_in_millisecond{namespace=\"$namespace\"}\n* on(namespace,pod)\ngroup_left() polardbx_cdc_up{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\", app_kubernetes_io_component=\"cdc\"}", + "interval": "", + "legendFormat": "{{ pod }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Delay(ms)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 87 + }, + "id": 77, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "datasource": { + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "polardbx_cdc_dumper_jvm_heap_usage{namespace=\"$namespace\"}\n* on(namespace,pod)\ngroup_left() polardbx_cdc_up{app_kubernetes_io_instance=\"$polardbx\", namespace=\"$namespace\", app_kubernetes_io_component=\"cdc\"}", + "interval": "", + "legendFormat": "{{ pod }}", + "queryType": "randomWalk", + "range": true, + "refId": "A" + } + ], + "title": "Dumper Heap Usage", + "type": "timeseries" + } + ], + "refresh": "10s", + "schemaVersion": 37, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "isNone": true, + "selected": false, + "text": "None", + "value": "" + }, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(kube_pod_info, cluster)", + "hide": 2, + "includeAll": false, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(kube_pod_info, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "selected": false, + "text": "default", + "value": "default" + }, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "default", + "value": "default" + }, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(polardbx_up, namespace)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(polardbx_up, namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "/^(?!kube\\-system|.*\\-operator\\-system|monitoring|loki)/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "selected": false, + "text": "pxc-v4", + "value": "pxc-v4" + }, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(polardbx_up{namespace=\"$namespace\"}, app_kubernetes_io_instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "polardbx", + "options": [], + "query": { + "query": "label_values(polardbx_up{namespace=\"$namespace\"}, app_kubernetes_io_instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "PolarDB-X Overview", + "uid": "EByXvnuGk", + "version": 2, + "weekStart": "" +} \ No newline at end of file diff --git a/deploy/polardbx/scripts/gms-init.sql b/deploy/polardbx/scripts/gms-init.sql new file mode 100644 index 00000000000..496edb02233 --- /dev/null +++ b/deploy/polardbx/scripts/gms-init.sql @@ -0,0 +1,153 @@ +CREATE DATABASE IF NOT EXISTS polardbx_meta_db; +USE polardbx_meta_db; + +CREATE TABLE IF NOT EXISTS server_info ( + id BIGINT(11) NOT NULL auto_increment, + gmt_created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on UPDATE CURRENT_TIMESTAMP, + inst_id VARCHAR(128) NOT NULL, + inst_type INT(11) NOT NULL, + ip VARCHAR(128) NOT NULL, + port INT(11) NOT NULL, + htap_port INT(11) NOT NULL, + mgr_port INT(11) NOT NULL, + mpp_port INT(11) NOT NULL, + status INT(11) NOT NULL, + region_id VARCHAR(128) DEFAULT NULL, + azone_id VARCHAR(128) DEFAULT NULL, + idc_id VARCHAR(128) DEFAULT NULL, + cpu_core INT(11) DEFAULT NULL, + mem_size INT(11) DEFAULT NULL, + extras text DEFAULT NULL, + PRIMARY KEY (id), + UNIQUE KEY uk_inst_id_addr (inst_id, ip, port), + INDEX idx_inst_id_status (inst_id, status) +) engine = innodb DEFAULT charset = utf8; + +CREATE TABLE IF NOT EXISTS storage_info ( + id BIGINT(11) NOT NULL auto_increment, + gmt_created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on UPDATE CURRENT_TIMESTAMP, + inst_id VARCHAR(128) NOT NULL, + storage_inst_id VARCHAR(128) NOT NULL, + storage_master_inst_id VARCHAR(128) NOT NULL, + ip VARCHAR(128) NOT NULL, + port INT(11) NOT NULL comment 'port for mysql', + xport INT(11) DEFAULT NULL comment 'port for x-protocol', + user VARCHAR(128) NOT NULL, + passwd_enc text NOT NULL, + storage_type INT(11) NOT NULL comment '0:x-cluster, 1:mysql, 2:polardb', + inst_kind INT(11) NOT NULL comment '0:master, 1:slave, 2:metadb', + status INT(11) NOT NULL comment '0:storage ready, 1:storage not_ready', + region_id VARCHAR(128) DEFAULT NULL, + azone_id VARCHAR(128) DEFAULT NULL, + idc_id VARCHAR(128) DEFAULT NULL, + max_conn INT(11) NOT NULL, + cpu_core INT(11) DEFAULT NULL, + mem_size INT(11) DEFAULT NULL comment 'mem unit: MB', + is_vip INT(11) DEFAULT NULL COMMENT '0:ip is NOT vip, 1:ip is vip', + extras text DEFAULT NULL COMMENT 'reserve for extra info', + PRIMARY KEY (id), + INDEX idx_inst_id_status (inst_id, status), + UNIQUE KEY uk_inst_id_addr (storage_inst_id, ip, port, inst_kind) +) engine = innodb DEFAULT charset = utf8; + +CREATE TABLE if not exists user_priv ( + id bigint(11) NOT NULL AUTO_INCREMENT, + gmt_created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + user_name char(32) COLLATE utf8_unicode_ci NOT NULL DEFAULT '', + host char(60) COLLATE utf8_unicode_ci NOT NULL DEFAULT '', + password char(100) COLLATE utf8_unicode_ci NOT NULL, + select_priv tinyint(1) NOT NULL DEFAULT '0', + insert_priv tinyint(1) NOT NULL DEFAULT '0', + update_priv tinyint(1) NOT NULL DEFAULT '0', + delete_priv tinyint(1) NOT NULL DEFAULT '0', + create_priv tinyint(1) NOT NULL DEFAULT '0', + drop_priv tinyint(1) NOT NULL DEFAULT '0', + grant_priv tinyint(1) NOT NULL DEFAULT '0', + index_priv tinyint(1) NOT NULL DEFAULT '0', + alter_priv tinyint(1) NOT NULL DEFAULT '0', + show_view_priv int(11) NOT NULL DEFAULT '0', + create_view_priv int(11) NOT NULL DEFAULT '0', + create_user_priv int(11) NOT NULL DEFAULT '0', + meta_db_priv int(11) NOT NULL DEFAULT '0', + PRIMARY KEY (id), + UNIQUE KEY uk (user_name, host) +) ENGINE = InnoDB DEFAULT CHARSET = utf8 COLLATE = utf8_unicode_ci COMMENT = 'Users and global privileges'; + +CREATE TABLE IF NOT EXISTS quarantine_config ( + id BIGINT(11) NOT NULL auto_increment, + gmt_created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on UPDATE CURRENT_TIMESTAMP, + inst_id VARCHAR(100) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL, + group_name VARCHAR(200) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL, + net_work_type VARCHAR(100) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, + security_ip_type VARCHAR(100) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, + security_ips text CHARACTER SET utf8 COLLATE utf8_unicode_ci, + PRIMARY KEY (id), + UNIQUE KEY uk (inst_id, group_name) +) engine = innodb DEFAULT charset = utf8 comment = 'Quarantine config'; + + +CREATE TABLE IF NOT EXISTS config_listener ( + id bigint(11) NOT NULL AUTO_INCREMENT, + gmt_created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + data_id varchar(200) NOT NULL, + status int NOT NULL COMMENT '0:normal, 1:removed', + op_version bigint NOT NULL, + extras varchar(1024) DEFAULT NULL, + PRIMARY KEY (id), + INDEX idx_modify_ts (gmt_modified), + INDEX idx_status (status), + UNIQUE KEY uk_data_id (data_id) +) ENGINE = InnoDB DEFAULT CHARSET = utf8; + +create table if not exists inst_config ( + id bigint(11) NOT NULL AUTO_INCREMENT, + gmt_created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + inst_id varchar(128) NOT NULL, + param_key varchar(128) NOT NULL, + param_val varchar(1024) NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY uk_inst_id_key (inst_id, param_key) +) ENGINE = InnoDB DEFAULT CHARSET = utf8; + +CREATE TABLE IF NOT EXISTS polardbx_extra ( + id BIGINT(11) NOT NULL auto_increment, + inst_id VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + type VARCHAR(10) NOT NULL, + comment VARCHAR(256) NOT NULL, + status INT(4) NOT NULL, + gmt_created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on + UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (id), + UNIQUE uk_inst_id_name_type (inst_id, name, type) +) engine = innodb DEFAULT charset = utf8 COLLATE = utf8_unicode_ci comment = 'extra table for polardbx manager'; + +CREATE TABLE IF NOT EXISTS schema_change ( + id BIGINT(11) NOT NULL AUTO_INCREMENT, + table_name varchar(64) NOT NULL, + version int unsigned NOT NULL, + gmt_created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (id), + UNIQUE KEY table_name (table_name) + ) ENGINE = innodb DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS k8s_topology ( + id BIGINT(11) NOT NULL AUTO_INCREMENT, + uid VARCHAR(128) NOT NULL, + name VARCHAR(128) NOT NULL, + type VARCHAR(10) NOT NULL, + gmt_created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (id), + UNIQUE KEY(uid), + UNIQUE KEY(name, type) +) ENGINE = InnoDB DEFAULT CHARSET = utf8 COLLATE = utf8_unicode_ci COMMENT = 'PolarDBX K8s Topology'; + diff --git a/deploy/polardbx/scripts/gms-metadata.tpl b/deploy/polardbx/scripts/gms-metadata.tpl new file mode 100644 index 00000000000..ca9c9cec2c2 --- /dev/null +++ b/deploy/polardbx/scripts/gms-metadata.tpl @@ -0,0 +1,23 @@ +USE polardbx_meta_db; + +INSERT IGNORE INTO schema_change(table_name, version) VALUES('user_priv', 1); +INSERT IGNORE INTO quarantine_config VALUES (NULL, NOW(), NOW(), '$KB_CLUSTER_NAME', 'default', NULL, NULL, '0.0.0.0/0'); + +INSERT IGNORE INTO config_listener (id, gmt_created, gmt_modified, data_id, status, op_version, extras) VALUES (NULL, NOW(), NOW(), 'polardbx.server.info.$KB_CLUSTER_NAME', 0, 0, NULL); +INSERT IGNORE INTO config_listener (id, gmt_created, gmt_modified, data_id, status, op_version, extras) VALUES (NULL, NOW(), NOW(), 'polardbx.storage.info.$KB_CLUSTER_NAME', 0, 0, NULL); +INSERT IGNORE INTO config_listener (id, gmt_created, gmt_modified, data_id, status, op_version, extras) VALUES (NULL, NOW(), NOW(), 'polardbx.inst.config.$KB_CLUSTER_NAME', 0, 0, NULL); +INSERT IGNORE INTO config_listener (id, gmt_created, gmt_modified, data_id, status, op_version, extras) VALUES (NULL, NOW(), NOW(), 'polardbx.quarantine.config.$KB_CLUSTER_NAME', 0, 0, NULL); +INSERT IGNORE INTO config_listener (id, gmt_created, gmt_modified, data_id, status, op_version, extras) VALUES (NULL, NOW(), NOW(), 'polardbx.privilege.info', 0, 0, NULL); + +INSERT IGNORE INTO inst_config (inst_id, param_key,param_val) values ('$KB_CLUSTER_NAME','CONN_POOL_XPROTO_META_DB_PORT','0'); +INSERT IGNORE INTO inst_config (inst_id, param_key,param_val) values ('$KB_CLUSTER_NAME','CDC_STARTUP_MODE','1'); +INSERT IGNORE INTO inst_config (inst_id, param_key,param_val) values ('$KB_CLUSTER_NAME','CONN_POOL_MAX_POOL_SIZE','500'); +INSERT IGNORE INTO inst_config (inst_id, param_key,param_val) values ('$KB_CLUSTER_NAME','MAX_PREPARED_STMT_COUNT','500000'); + +INSERT IGNORE INTO storage_info (id, gmt_created, gmt_modified, inst_id, storage_inst_id, storage_master_inst_id,ip, port, xport, user, passwd_enc, storage_type, inst_kind, status, region_id, azone_id, idc_id, max_conn, cpu_core, mem_size, is_vip, extras) + VALUES (NULL, NOW(), NOW(), '$KB_CLUSTER_NAME', '$GMS_SVC_NAME', '$GMS_SVC_NAME', '$GMS_HOST', '3306', '31600', '$metaDbUser', '$ENC_PASSWORD', '3', '2', '0', NULL, NULL, NULL, 10000, 4, 34359738368 , '0', ''); + +INSERT IGNORE INTO user_priv (id, gmt_created, gmt_modified, user_name, host, password, select_priv, insert_priv, update_priv, delete_priv, create_priv, drop_priv, grant_priv, index_priv, alter_priv, show_view_priv, create_view_priv, create_user_priv, meta_db_priv) + VALUES (NULL, now(), now(), '$metaDbUser', '%', '$SHA1_ENC_PASSWORD', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'); + +UPDATE config_listener SET op_version = op_version + 1 WHERE data_id = 'polardbx.privilege.info'; \ No newline at end of file diff --git a/deploy/polardbx/scripts/metadb-setup.tpl b/deploy/polardbx/scripts/metadb-setup.tpl new file mode 100644 index 00000000000..6b8e8dd4b5e --- /dev/null +++ b/deploy/polardbx/scripts/metadb-setup.tpl @@ -0,0 +1,37 @@ +#!/bin/sh + +until mysql -h$GMS_SVC_NAME -P$GMS_SVC_PORT -u$metaDbUser -p$metaDbNonEncPasswd -e 'select 1'; do + sleep 1; + echo "wait gms ready" +done + +function generate_dn_init_sql() { + echo "$DN_HEADLESS_SVC_NAME" | tr ',' '\n' | while IFS= read -r item + do + DN_HOSTNAME=$item + DN_NAME=$(echo "$DN_HOSTNAME" | cut -d'.' -f2 | sed s/-headless//) + dn_init_sql="INSERT IGNORE INTO storage_info (id, gmt_created, gmt_modified, inst_id, storage_inst_id, storage_master_inst_id,ip, port, xport, user, passwd_enc, storage_type, inst_kind, status, region_id, azone_id, idc_id, max_conn, cpu_core, mem_size, is_vip, extras) + VALUES (NULL, NOW(), NOW(), '$KB_CLUSTER_NAME', '$DN_NAME', '$DN_NAME', '$DN_HOSTNAME', '3306', '31600', '$metaDbUser', '$ENC_PASSWORD', '3', '0', '0', NULL, NULL, NULL, 10000, 4, 34359738368 , '0', '');" + echo $dn_init_sql >> /scripts/gms-init-metadata.sql + done + echo "UPDATE config_listener SET op_version = op_version + 1 WHERE data_id = 'polardbx.storage.info.$KB_CLUSTER_NAME'" >> /scripts/gms-init-metadata.sql +} + +ENC_PASSWORD=$(echo -n "$metaDbNonEncPasswd" | openssl enc -aes-128-ecb -K "$(printf "%s" "$dnPasswordKey" | od -An -tx1 | tr -d " \n")" -base64) +SHA1_ENC_PASSWORD=$(echo -n "$metaDbNonEncPasswd" | sha1sum | cut -d ' ' -f1) +echo "export metaDbPasswd=$ENC_PASSWORD" >> /shared/env.sh + +SOURCE_CMD="mysql -h$GMS_SVC_NAME -P$GMS_SVC_PORT -u$metaDbUser -p$metaDbNonEncPasswd -e 'source /scripts/gms-init.sql'" +eval $SOURCE_CMD + +GMS_HOST=$GMS_SVC_NAME"."$KB_NAMESPACE".svc.cluster.local" + +eval "gms_metadata_sql=\"$(cat /scripts/gms-metadata.tpl)\"" + +echo $gms_metadata_sql > /scripts/gms-init-metadata.sql +generate_dn_init_sql + +cat /scripts/gms-init-metadata.sql + +eval "mysql -h$GMS_SVC_NAME -P$GMS_SVC_PORT -u$metaDbUser -p$metaDbNonEncPasswd -e 'source /scripts/gms-init-metadata.sql'" + diff --git a/deploy/polardbx/scripts/xstore-post-start.tpl b/deploy/polardbx/scripts/xstore-post-start.tpl new file mode 100644 index 00000000000..e11f09673da --- /dev/null +++ b/deploy/polardbx/scripts/xstore-post-start.tpl @@ -0,0 +1,24 @@ +#!/bin/sh +# usage: xstore-post-start.sh type_name +# type_name: component.type, in uppercase. + +TYPE_NAME=$1 + +# setup shared-channel.json +SHARED_CHANNEL_JSON='{"nodes": [' + +i=0 +while [ $i -lt $(eval echo \$KB_"$TYPE_NAME"_N) ]; do + hostname=$(eval echo \$KB_"$TYPE_NAME"_"$i"_HOSTNAME) + pod=$(echo "$hostname" | cut -d'.' -f1) + + NODE_OBJECT=$(printf '{"pod": "%s", "host": "%s", "port": 11306, "role": "candidate", "node_name": "%s" }' "$pod" "$hostname" "$pod") + SHARED_CHANNEL_JSON+="$NODE_OBJECT," + i=$(( i + 1)) +done + +SHARED_CHANNEL_JSON=${SHARED_CHANNEL_JSON%,} +SHARED_CHANNEL_JSON+=']}' + +mkdir -p /data/shared/ +echo $SHARED_CHANNEL_JSON > /data/shared/shared-channel.json diff --git a/deploy/polardbx/scripts/xstore-setup.tpl b/deploy/polardbx/scripts/xstore-setup.tpl new file mode 100644 index 00000000000..2dcbf872e5f --- /dev/null +++ b/deploy/polardbx/scripts/xstore-setup.tpl @@ -0,0 +1,17 @@ +#!/bin/sh +# usage: xstore-setup.sh +# setup root account for xstore and run entrypoint + +function setup_account() { + until myc -e 'select 1'; do + sleep 1; + echo "wait mysql ready" + done + echo "mysql is ok" + myc -e "SET sql_log_bin=OFF;SET force_revise=ON;CREATE USER IF NOT EXISTS $KB_SERVICE_USER IDENTIFIED BY '$KB_SERVICE_PASSWORD';GRANT ALL PRIVILEGES ON *.* TO $KB_SERVICE_USER;ALTER USER $KB_SERVICE_USER IDENTIFIED BY '$KB_SERVICE_PASSWORD';" + +} + + +setup_account & +/tools/xstore/current/venv/bin/python3 /tools/xstore/current/entrypoint.py diff --git a/deploy/polardbx/templates/NOTES.txt b/deploy/polardbx/templates/NOTES.txt new file mode 100644 index 00000000000..3b27cadf8f2 --- /dev/null +++ b/deploy/polardbx/templates/NOTES.txt @@ -0,0 +1,14 @@ +Thanks for installing PolarDB-X using KubeBlocks! + +1. Run the following command to create your first PolarDB-X cluster: + +``` +kbcli cluster create pxc --cluster-definition polardbx +``` + +2. Port-forward service to localhost and connect to PolarDB-X cluster: + +``` +kubectl port-forward svc/pxc-cn 3306:3306 +mysql -h127.0.0.1 -upolardbx_root +``` \ No newline at end of file diff --git a/deploy/polardbx/templates/_helpers.tpl b/deploy/polardbx/templates/_helpers.tpl new file mode 100644 index 00000000000..963bdc3f623 --- /dev/null +++ b/deploy/polardbx/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "polardbx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "polardbx.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "polardbx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "polardbx.labels" -}} +helm.sh/chart: {{ include "polardbx.chart" . }} +{{ include "polardbx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "polardbx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "polardbx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "polardbx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "polardbx.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/deploy/polardbx/templates/clusterDefintion.yaml b/deploy/polardbx/templates/clusterDefintion.yaml new file mode 100644 index 00000000000..1429045bb82 --- /dev/null +++ b/deploy/polardbx/templates/clusterDefintion.yaml @@ -0,0 +1,694 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterDefinition +metadata: + name: polardbx + labels: + {{- include "polardbx.labels" . | nindent 4 }} +spec: + connectionCredential: + username: "polardbx_root" + password: "$(RANDOM_PASSWD)" + endpoint: "$(SVC_FQDN):$(SVC_PORT_polardbx)" + host: "$(SVC_FQDN)" + port: "$(SVC_PORT_polardbx)" + metaDbPasswd: "$(RANDOM_PASSWD)" + componentDefs: + - name: gms + scriptSpecs: + - name: polardbx-scripts + templateRef: polardbx-scripts + volumeName: scripts + namespace: {{ .Release.Namespace }} + defaultMode: 0555 + workloadType: Consensus + characterType: polardbx + consensusSpec: + leader: + name: "leader" + accessMode: ReadWrite + followers: + - name: "follower" + accessMode: Readonly + updateStrategy: Parallel + probes: + roleProbe: + failureThreshold: {{ .Values.roleProbe.failureThreshold }} + periodSeconds: {{ .Values.roleProbe.periodSeconds }} + timeoutSeconds: {{ .Values.roleProbe.timeoutSeconds }} + service: + ports: + - name: mysql + port: 3306 + targetPort: 3306 + - name: metrics + port: 9104 + targetPort: 9104 + monitor: + builtIn: false + exporterConfig: + scrapePort: 9104 + scrapePath: "/metrics" + podSpec: + volumes: &xstoreVolumes + - hostPath: + path: /data/cache/tools/xstore + type: Directory + name: xstore-tools + - downwardAPI: + defaultMode: 420 + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.labels + path: labels + - fieldRef: + apiVersion: v1 + fieldPath: metadata.annotations + path: annotations + - fieldRef: + apiVersion: v1 + fieldPath: metadata.annotations['runmode'] + path: runmode + - fieldRef: + apiVersion: v1 + fieldPath: metadata.name + path: name + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace + name: podinfo + initContainers: &xsotreInitContainers + - name: tools-updater + command: ["/bin/ash"] + args: ["-c", "./hack/update.sh /target"] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: xstore-tools + mountPath: /target + containers: + - name: engine + command: ["/scripts/xstore-setup.sh"] + lifecycle: + postStart: + exec: + command: + - /scripts/xstore-post-start.sh + - GMS + env: &xstoreEngineEnv + - name: LANG + value: en_US.utf8 + - name: LC_ALL + value: en_US.utf8 + - name: ENGINE + value: galaxy + - name: ENGINE_HOME + value: /opt/galaxy_engine + - name: NODE_ROLE + value: candidate + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: LIMITS_CPU + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.cpu + divisor: "1m" + - name: LIMITS_MEM + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.memory + - name: PORT_MYSQL + value: "3306" + - name: PORT_PAXOS + value: "11306" + - name: PORT_POLARX + value: "31600" + - name: KB_SERVICE_USER + value: "polardbx_root" + - name: KB_SERVICE_PASSWORD + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false + - name: RSM_COMPATIBILITY_MODE + value: "true" + ports: &xstoreEnginePorts + - name: mysql + containerPort: 3306 + - name: paxos + containerPort: 11306 + - name: polarx + containerPort: 31600 + startupProbe: + failureThreshold: 60 + tcpSocket: + port: mysql + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 30 + volumeMounts: &xstoreEngineVolumeMounts + - name: data + mountPath: /data/mysql + - name: data-log + mountPath: /data-log/mysql + - name: xstore-tools + mountPath: /tools/xstore + - name: scripts + mountPath: /scripts/xstore-post-start.sh + subPath: xstore-post-start.sh + - name: scripts + mountPath: /scripts/xstore-setup.sh + subPath: xstore-setup.sh + - name: podinfo + mountPath: /etc/podinfo + - name: exporter + imagePullPolicy: IfNotPresent + ports: + - name: metrics + containerPort: 9104 + protocol: TCP + env: + - name: "MYSQL_MONITOR_USER" + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + - name: "MYSQL_MONITOR_PASSWORD" + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + - name: "DATA_SOURCE_NAME" + value: "$(MYSQL_MONITOR_USER):$(MYSQL_MONITOR_PASSWORD)@(localhost:3306)/" + - name: dn + scriptSpecs: + - name: polardbx-scripts + templateRef: polardbx-scripts + volumeName: scripts + namespace: {{ .Release.Namespace }} + defaultMode: 0555 + workloadType: Consensus + characterType: polardbx + componentDefRef: + - &gmsRef + componentDefName: gms + componentRefEnv: + - name: GMS_SVC_PORT + valueFrom: + type: FieldRef + fieldPath: $.componentDef.service.ports[?(@.name == "mysql")].port + - name: GMS_SVC_NAME + valueFrom: + type: ServiceRef + consensusSpec: + leader: + name: "leader" + accessMode: ReadWrite + followers: + - name: "follower" + accessMode: Readonly + updateStrategy: Parallel + probes: + roleProbe: + failureThreshold: {{ .Values.roleProbe.failureThreshold }} + periodSeconds: {{ .Values.roleProbe.periodSeconds }} + timeoutSeconds: {{ .Values.roleProbe.timeoutSeconds }} + service: + ports: + - name: mysql + port: 3306 + targetPort: 3306 + monitor: + builtIn: false + exporterConfig: + scrapePort: 9104 + scrapePath: "/metrics" + podSpec: + volumes: *xstoreVolumes + initContainers: *xsotreInitContainers + containers: + - name: engine + command: [ "/scripts/xstore-setup.sh" ] + lifecycle: + postStart: + exec: + command: + - /scripts/xstore-post-start.sh + - DN + env: *xstoreEngineEnv + ports: *xstoreEnginePorts + startupProbe: + failureThreshold: 60 + tcpSocket: + port: mysql + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 30 + volumeMounts: *xstoreEngineVolumeMounts + - name: exporter + imagePullPolicy: IfNotPresent + ports: + - name: metrics + containerPort: 9104 + protocol: TCP + env: + - name: "MYSQL_MONITOR_USER" + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + - name: "MYSQL_MONITOR_PASSWORD" + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + - name: "DATA_SOURCE_NAME" + value: "$(MYSQL_MONITOR_USER):$(MYSQL_MONITOR_PASSWORD)@(localhost:3306)/" + - name: cn + scriptSpecs: + - name: polardbx-scripts + templateRef: polardbx-scripts + volumeName: scripts + namespace: {{ .Release.Namespace }} + defaultMode: 0555 + workloadType: Stateless + characterType: mysql + componentDefRef: + - *gmsRef + - componentDefName: dn + componentRefEnv: + - name: DN_SVC_PORT + valueFrom: + type: FieldRef + fieldPath: $.componentDef.service.ports[?(@.name == "mysql")].port + - name: DN_HEADLESS_SVC_NAME + valueFrom: + type: HeadlessServiceRef + format: $(POD_FQDN){{ .Values.clusterDomain }} + joinWith: "," + service: + ports: + - name: mysql + port: 3306 + targetPort: 3306 + - name: metrics + port: 9104 + targetPort: 9104 + monitor: + builtIn: false + exporterConfig: + scrapePort: 9104 + scrapePath: "/metrics" + podSpec: + shareProcessNamespace: true # For jmx collector + volumes: + - name: shared + emptyDir: {} + initContainers: + - name: metadb-init + command: ["/scripts/metadb-setup.sh"] + env: + - name: metaDbAddr + value: "$(GMS_SVC_NAME):$(GMS_SVC_PORT)" + - name: metaDbName + value: "polardbx_meta_db" + - name: metaDbUser + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + optional: false + - name: metaDbNonEncPasswd + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false + - name: dnPasswordKey + value: "$(metaDbNonEncPasswd)$(metaDbNonEncPasswd)" + - name: switchCloud + value: aliyun + - name: metaDbConn + value: "mysql -h$(GMS_SVC_NAME) -P3306 -u$(metaDbUser) -p$(metaDbNonEncPasswd) -D$(metaDbName)" + volumeMounts: + - name: scripts + mountPath: /scripts/metadb-setup.sh + subPath: metadb-setup.sh + - name: scripts + mountPath: /scripts/gms-init.sql + subPath: gms-init.sql + - name: scripts + mountPath: /scripts/gms-metadata.tpl + subPath: gms-metadata.tpl + - name: shared + mountPath: /shared + - name: init + command: [ "sh" ] + args: [ "-c", 'source /shared/env.sh && /polardbx-init' ] + env: &cnEngineEnv + - name: POD_ID + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: metaDbAddr + value: "$(GMS_SVC_NAME):$(GMS_SVC_PORT)" + - name: metaDbName + value: "polardbx_meta_db" + - name: metaDbUser + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + optional: false + - name: metaDbNonEncPasswd + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false + - name: switchCloud + value: aliyun + - name: metaDbConn + value: "mysql -h$(GMS_SVC_NAME) -P3306 -u$(metaDbUser) -p$(metaDbPasswd) -D$(metaDbName)" + - name: dnPasswordKey + value: "$(metaDbNonEncPasswd)$(metaDbNonEncPasswd)" + - name: metaDbXprotoPort + value: "0" + - name: storageDbXprotoPort + value: "0" + - name: instanceId + value: "$(KB_CLUSTER_NAME)" + - name: instanceType + value: "0" + - name: serverPort + value: "3306" + - name: mgrPort + value: "3406" + - name: mppPort + value: "3506" + - name: htapPort + value: "3606" + - name: logPort + value: "8507" + - name: ins_id + value: dummy + - name: polarx_dummy_log_port + value: "$(logPort)" + - name: polarx_dummy_ssh_port + value: "-1" + - name: cpuCore + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.cpu + - name: memSize + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.memory + - name: cpu_cores + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.cpu + - name: memory + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.memory + - name: galaxyXProtocol + value: "1" + - name: processorHandler + value: "1" + - name: processors + value: "1" + - name: serverExecutor + value: "1024" + - name: TDDL_OPTS + value: -Dpod.id=$(POD_ID) -XX:+UnlockExperimentalVMOptions -XX:+UseWisp2 -Dio.grpc.netty.shaded.io.netty.transport.noNative=true + -Dio.netty.transport.noNative=true -DinstanceVersion=8.0.3 + volumeMounts: + - name: shared + mountPath: /shared + containers: + - name: engine + command: + - /bin/bash + - -c + args: + - "source /shared/env.sh && /home/admin/entrypoint.sh 20" + env: *cnEngineEnv + ports: + - containerPort: 3306 + name: mysql + protocol: TCP + - containerPort: 3406 + name: mgr + protocol: TCP + - containerPort: 3506 + name: mpp + protocol: TCP + - containerPort: 3606 + name: htap + protocol: TCP + - containerPort: 8507 + name: log + protocol: TCP + startupProbe: + failureThreshold: 60 + tcpSocket: + port: mysql + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 30 + livenessProbe: + failureThreshold: 60 + tcpSocket: + port: mysql + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 30 + readinessProbe: + failureThreshold: 60 + tcpSocket: + port: mysql + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 30 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: polardbx-log + mountPath: /home/admin/drds-server/logs + - name: polardbx-spill + mountPath: /home/admin/drds-server/spill + - name: shared + mountPath: /shared + - name: exporter + args: + - -collectors.process + - -collectors.jvm + - -target.type=CN + - -target.port=3406 + - -web.listen-addr=:9104 + - -web.metrics-path=/metrics + env: + - name: GOMAXPROCS + value: "1" + ports: + - containerPort: 9104 + name: metrics + protocol: TCP + volumeMounts: + - name: tmp + mountPath: /tmp + - name: cdc + scriptSpecs: + - name: polardbx-scripts + templateRef: polardbx-scripts + volumeName: scripts + namespace: {{ .Release.Namespace }} + defaultMode: 0555 + workloadType: Stateless + characterType: mysql + componentDefRef: + - *gmsRef + - componentDefName: cn + componentRefEnv: + - name: CN_SVC_PORT + valueFrom: + type: FieldRef + fieldPath: $.componentDef.service.ports[?(@.name == "mysql")].port + - name: CN_SVC_NAME + valueFrom: + type: ServiceRef + service: + ports: + - name: mysql + port: 3306 + targetPort: 3306 + - name: metrics + port: 9104 + targetPort: 9104 + monitor: + builtIn: false + exporterConfig: + scrapePort: 9104 + scrapePath: "/metrics" + podSpec: + initContainers: + - name: wait-cn-ready + command: + - bin/sh + - -c + - | + until mysql -h$CN_SVC_NAME -P$CN_SVC_PORT -u$polarx_username -p$polarx_password -e 'select 1'; do + sleep 1; + echo "cn is not ready" + done + env: + - name: polarx_username + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + optional: false + - name: polarx_password + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false + containers: + - name: engine + env: + - name: switchCloud + value: aliyun + - name: cluster_id + value: "$(KB_CLUSTER_NAME)" + - name: ins_id + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: daemonPort + value: "3300" + - name: common_ports + value: '{"cdc1_port":"3009","cdc3_port":"3011","cdc2_port":"3010","cdc6_port":"3014","cdc5_port":"3013","cdc4_port":"3012"}' + - name: metaDb_url + value: "jdbc:mysql://$(GMS_SVC_NAME):$(GMS_SVC_PORT)/polardbx_meta_db?useSSL=false" + - name: polarx_url + value: "jdbc:mysql://$(CN_SVC_NAME):$(CN_SVC_PORT)/__cdc__?useSSL=false" + - name: metaDb_username + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + optional: false + - name: metaDb_password + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false + - name: polarx_username + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + optional: false + - name: polarx_password + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false + - name: metaDbNonEncPasswd + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false + - name: dnPasswordKey + value: "$(metaDbNonEncPasswd)$(metaDbNonEncPasswd)" + - name: cpu_cores + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.cpu + - name: mem_size + valueFrom: + resourceFieldRef: + containerName: engine + resource: limits.memory + divisor: "1M" + - name: disk_size + value: "10240" + - name: disk_quota + value: "10240" + volumeMounts: + - name: binlog + mountPath: /home/admin/binlog + - name: log + mountPath: /home/admin/logs + - name: exporter + args: + - -web.listen-addr=:9104 + - -web.metrics-path=/metrics + - -target.port=3007 + - -target.type=CDC + env: + - name: GOMAXPROCS + value: "1" + ports: + - containerPort: 9104 + name: metrics + protocol: TCP + diff --git a/deploy/polardbx/templates/clusterVersion.yaml b/deploy/polardbx/templates/clusterVersion.yaml new file mode 100644 index 00000000000..c8271c2f64f --- /dev/null +++ b/deploy/polardbx/templates/clusterVersion.yaml @@ -0,0 +1,64 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterVersion +metadata: + name: polardbx-{{ default .Chart.AppVersion .Values.clusterVersionOverride }} + labels: + {{- include "polardbx.labels" . | nindent 4 }} +spec: + clusterDefinitionRef: polardbx + componentVersions: + - componentDefRef: gms + versionsContext: + containers: + - name: engine + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.dn.name}}:{{.Values.images.polardbx.dn.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + - name: exporter + image: {{ .Values.images.prom.repository }}/{{ .Values.images.prom.mysqld_exporter.name}}:{{.Values.images.prom.mysqld_exporter.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.prom.pullPolicy }} + initContainers: + - name: tools-updater + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.toolsUpdater.name }}:{{.Values.images.polardbx.toolsUpdater.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + - componentDefRef: dn + versionsContext: + containers: + - name: engine + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.dn.name}}:{{.Values.images.polardbx.dn.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + - name: exporter + image: {{ .Values.images.prom.repository }}/{{ .Values.images.prom.mysqld_exporter.name}}:{{.Values.images.prom.mysqld_exporter.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.prom.pullPolicy }} + initContainers: + - name: tools-updater + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.toolsUpdater.name }}:{{.Values.images.polardbx.toolsUpdater.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + - componentDefRef: cn + versionsContext: + containers: + - name: engine + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.cn.name}}:{{.Values.images.polardbx.cn.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + - name: exporter + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.exporter.name}}:{{.Values.images.polardbx.exporter.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + initContainers: + - name: init + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.init.name }}:{{.Values.images.polardbx.init.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + - name: metadb-init + image: {{ .Values.images.mysql.repository }}:{{ .Values.images.mysql.tag }} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.mysql.pullPolicy }} + - componentDefRef: cdc + versionsContext: + containers: + - name: engine + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.cdc.name}}:{{.Values.images.polardbx.cdc.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + - name: exporter + image: {{ .Values.images.polardbx.repository }}/{{ .Values.images.polardbx.exporter.name}}:{{.Values.images.polardbx.exporter.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.polardbx.pullPolicy }} + initContainers: + - name: wait-cn-ready + image: {{ .Values.images.mysql.repository }}:{{ .Values.images.mysql.tag }} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.mysql.pullPolicy }} \ No newline at end of file diff --git a/deploy/polardbx/templates/configmap-dashboards.yaml b/deploy/polardbx/templates/configmap-dashboards.yaml new file mode 100644 index 00000000000..686708eb6b1 --- /dev/null +++ b/deploy/polardbx/templates/configmap-dashboards.yaml @@ -0,0 +1,19 @@ +{{- $files := .Files.Glob "dashboards/*.json" }} +{{- if $files }} +apiVersion: v1 +kind: ConfigMapList +items: +{{- range $path, $fileContents := $files }} +{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} +- apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ printf "grafana-%s" $dashboardName | trunc 63 | trimSuffix "-" }} + labels: + grafana_dashboard: "1" + app: {{ template "polardbx.name" $ }}-grafana +{{ include "polardbx.labels" $ | indent 6 }} + data: + {{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }} +{{- end }} +{{- end }} diff --git a/deploy/polardbx/templates/scriptstemplate.yaml b/deploy/polardbx/templates/scriptstemplate.yaml new file mode 100644 index 00000000000..7aed5b4d971 --- /dev/null +++ b/deploy/polardbx/templates/scriptstemplate.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: polardbx-scripts + labels: + {{- include "polardbx.labels" . | nindent 4 }} +data: + xstore-post-start.sh: |- + {{- .Files.Get "scripts/xstore-post-start.tpl" | nindent 4 }} + xstore-setup.sh: |- + {{- .Files.Get "scripts/xstore-setup.tpl" | nindent 4 }} + gms-init.sql: |- + {{- .Files.Get "scripts/gms-init.sql" | nindent 4 }} + gms-metadata.tpl: |- + {{- .Files.Get "scripts/gms-metadata.tpl" | nindent 4 }} + metadb-setup.sh: |- + {{- .Files.Get "scripts/metadb-setup.tpl" | nindent 4 }} \ No newline at end of file diff --git a/deploy/polardbx/values.yaml b/deploy/polardbx/values.yaml new file mode 100644 index 00000000000..d16bade0141 --- /dev/null +++ b/deploy/polardbx/values.yaml @@ -0,0 +1,61 @@ +# Default values for PolarDB-X. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +clusterVersionOverride: "" + +roleProbe: + failureThreshold: 2 + periodSeconds: 1 + timeoutSeconds: 1 + +# Related image configurations. +images: + polardbx: + pullPolicy: IfNotPresent + # Repo of polardbx default images. Default is polardbx. + repository: polardbx + + # Images for xstore(DN) tools updater. + toolsUpdater: + name: xstore-tools + tag: latest + + # Image for DN engine + dn: + name: polardbx-engine-2.0 + tag: latest + + # Image for CN engine + cn: + name: polardbx-sql + tag: latest + + # Image for CN initialization + init: + name: polardbx-init + tag: latest + + # Image for CN engine + cdc: + name: polardbx-cdc + tag: latest + + # Image for CN&CDC exporter + exporter: + name: polardbx-exporter + tag: latest + + # Tool image settings for gms initialization + mysql: + repository: mysql + pullPolicy: IfNotPresent + tag: "8.0.30" + + # Images for DN exporter + prom: + repository: prom + pullPolicy: IfNotPresent + mysqld_exporter: + name: mysqld-exporter + tag: v0.14.0 From 62b25601b62bde590129b5a310b6284c3534bfb5 Mon Sep 17 00:00:00 2001 From: yuanyuan zhang <111744220+michelle-0808@users.noreply.github.com> Date: Mon, 25 Sep 2023 14:31:03 +0800 Subject: [PATCH 25/58] docs: update pg and config docs (#5226) --- .../configuration/configuration.md | 34 +++-- .../configuration/configuration.md | 48 ++++--- .../configuration/configuration.md | 135 +++--------------- .../restart-a-postgresql-cluster.md | 4 +- .../configuration/configuration.md | 129 +++-------------- .../create-pulsar-cluster-on-kubeblocks.md | 5 +- .../configuration/configuration.md | 89 +++--------- .../configuration/configuration.md | 133 +++-------------- 8 files changed, 116 insertions(+), 461 deletions(-) diff --git a/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md b/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md index 310c51c6db9..848d04defa3 100644 --- a/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-kafka/configuration/configuration.md @@ -7,7 +7,7 @@ sidebar_position: 1 # Configure cluster parameters -The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter configuration, searching the parameter user guide, and validating parameter effectiveness. From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. @@ -61,14 +61,12 @@ You can also view the details of this configuration file and parameters. * Allowed Values: It defines the valid value range of this parameter. - * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter reconfiguration takes effect. Currerntly, Kafka only supports static strategy, i.e. `Dynamic` is `false`. Restarting is required to make reconfiguration effective since using kbcli to configure parameters triggers broker restarting. + * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter configuration takes effect. Currerntly, Kafka only supports static strategy, i.e. `Dynamic` is `false`. Restarting is required to make configuration effective. * Description: It describes the parameter definition. -## Reconfigure parameters with configure command +## Configure parameters -### Reconfigure static parameters - -Static parameter reconfiguring requires restarting the pod. +### Configure parameters with configure command 1. View the current value of `log.cleanup.policy`. @@ -86,15 +84,15 @@ Static parameter reconfiguring requires restarting the pod. :::note - Make sure the value you set is within the Allowed Values of this parameter. Otherwise, the reconfiguration may fail. + Make sure the value you set is within the Allowed Values of this parameter. Otherwise, the configuration may fail. ::: -3. View the status of the parameter reconfiguration. +3. View the status of the parameter configuration. - `Status.Progress` and `Status.Status` shows the overall status of the parameter reconfiguration and Conditions show the details. + `Status.Progress` and `Status.Status` shows the overall status of the parameter configuration and Conditions show the details. - When the `Status.Status` shows `Succeed`, the reconfiguration is completed. + When the `Status.Status` shows `Succeed`, the configuration is completed.

@@ -139,7 +137,7 @@ Static parameter reconfiguring requires restarting the pod.
-4. View the configuration file to verify whether the parameter is modified. +4. View the configuration file to verify whether the parameter is configured as expected. The whole searching process has a 30-second delay. @@ -150,9 +148,9 @@ Static parameter reconfiguring requires restarting the pod. mykafka-reconfiguring-wvqns mykafka broker kafka-configuration-tpl server.properties Succeed restart 1/1 Sep 14,2023 16:28 UTC+0800 {"server.properties":"{\"log.cleanup.policy\":\"compact\"}"} ``` -## Reconfigure parameters with edit-config command +### Configure parameters with edit-config command -For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. +For your convenience, KubeBlocks offers a tool `edit-config` to help you to configure parameter in a visulized way. For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. @@ -168,13 +166,13 @@ If there are multiple components in a cluster, use `--component` to specify a co ::: -2. View the status of the parameter reconfiguration. +2. View the status of the parameter configuration. ```bash kbcli cluster describe-ops xxx -n default ``` -3. Connect to the database to verify whether the parameters are modified +3. Connect to the database to verify whether the parameters are configured as expected. ```bash kbcli cluster connect mykafka @@ -183,15 +181,15 @@ If there are multiple components in a cluster, use `--component` to specify a co :::note 1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. -2. Deleting a parameter will be supported in later version. +2. Deleting a parameter will be supported later. ::: ## View history and compare differences -After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. +After the configuration is completed, you can search the configuration history and compare the parameter differences. -View the parameter reconfiguration history. +View the parameter configuration history. ```bash kbcli cluster describe-config mykafka diff --git a/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md b/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md index 2d82626abbc..d87950cc4bc 100644 --- a/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-mongodb/configuration/configuration.md @@ -7,28 +7,28 @@ sidebar_position: 1 # Configure cluster parameters -The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter configuration, searching the parameter user guide, and validating parameter effectiveness. From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. ## View parameter information -View the current configuration file of a cluster. +* View the current configuration file of a cluster. -```bash -kbcli cluster describe-config mongodb-cluster -> -ConfigSpecs Meta: -CONFIG-SPEC-NAME FILE ENABLED TEMPLATE CONSTRAINT RENDERED COMPONENT CLUSTER -mongodb-config keyfile false mongodb5.0-config-template mongodb-config-constraints mongodb-cluster-replicaset-mongodb-config replicaset mongodb-cluster -mongodb-config mongodb.conf true mongodb5.0-config-template mongodb-config-constraints mongodb-cluster-replicaset-mongodb-config replicaset mongodb-cluster -mongodb-metrics-config metrics-config.yaml false mongodb-metrics-config mongodb-cluster-replicaset-mongodb-metrics-config replicaset mongodb-cluster - -History modifications: -OPS-NAME CLUSTER COMPONENT CONFIG-SPEC-NAME FILE STATUS POLICY PROGRESS CREATED-TIME VALID-UPDATED -``` + ```bash + kbcli cluster describe-config mongodb-cluster + > + ConfigSpecs Meta: + CONFIG-SPEC-NAME FILE ENABLED TEMPLATE CONSTRAINT RENDERED COMPONENT CLUSTER + mongodb-config keyfile false mongodb5.0-config-template mongodb-config-constraints mongodb-cluster-replicaset-mongodb-config replicaset mongodb-cluster + mongodb-config mongodb.conf true mongodb5.0-config-template mongodb-config-constraints mongodb-cluster-replicaset-mongodb-config replicaset mongodb-cluster + mongodb-metrics-config metrics-config.yaml false mongodb-metrics-config mongodb-cluster-replicaset-mongodb-metrics-config replicaset mongodb-cluster + + History modifications: + OPS-NAME CLUSTER COMPONENT CONFIG-SPEC-NAME FILE STATUS POLICY PROGRESS CREATED-TIME VALID-UPDATED + ``` -From the meta information, the cluster `mongodb-cluster` has a configuration file named `mongodb.conf`. + From the meta information, the cluster `mongodb-cluster` has a configuration file named `mongodb.conf`. You can also view the details of this configuration file and parameters. @@ -38,9 +38,11 @@ You can also view the details of this configuration file and parameters. kbcli cluster describe-config mongodb-cluster --show-detail ``` -## Reconfigure parameters with --set flag +## Configure parameters + +### Configure parameters with configure command -The example below reconfigures velocity to 1. +The example below configures velocity to 1. 1. Adjust the values of `velocity` to 1. @@ -72,16 +74,16 @@ The example below reconfigures velocity to 1. mongodb-cluster-reconfiguring-q8ndn mongodb-cluster mongodb mongodb-config mongodb.conf Succeed restart 3/3 Apr 21,2023 18:56 UTC+0800 {"mongodb.conf":"{\"systemLog\":{\"verbosity\":\"1\"}}"}``` ``` -3. Verify change result. +3. Verify configuration result. ```bash root@mongodb-cluster-mongodb-0:/# cat etc/mongodb/mongodb.conf |grep verbosity verbosity: "1" ``` -## Reconfigure parameters with edit-config +### Configure parameters with edit-config command -For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. +For your convenience, KubeBlocks offers a tool `edit-config` to help you to configure parameter in a visulized way. For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. @@ -97,13 +99,13 @@ If there are multiple components in a cluster, use `--component` to specify a co ::: -2. View the status of the parameter reconfiguration. +2. View the status of the parameter configuration. ```bash kbcli cluster describe-ops xxx -n default ``` -3. Connect to the database to verify whether the parameters are modified +3. Connect to the database to verify whether the parameters are configured as expected. ```bash kbcli cluster connect mongodb-cluster @@ -112,6 +114,6 @@ If there are multiple components in a cluster, use `--component` to specify a co :::note 1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. -2. Deleting a parameter will be supported in later version. +2. Deleting a parameter will be supported later. ::: diff --git a/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md b/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md index 19d05dac252..63f0f72b7e2 100644 --- a/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-mysql/configuration/configuration.md @@ -7,7 +7,7 @@ sidebar_position: 1 # Configure cluster parameters -The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter configuration, searching the parameter user guide, and validating parameter effectiveness. From v0.6.0, KubeBlocks supports both `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. @@ -61,16 +61,16 @@ You can also view the details of this configuration file and parameters. * Allowed Values: It defines the valid value range of this parameter. - * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter reconfiguration takes effect. There are two different reconfiguration strategies based on the effectiveness type of modified parameters, i.e. **dynamic** and **static**. - * When `Dynamic` is `true`, it means the effectiveness type of parameters is **dynamic** and can be updated online. Follow the instructions in [Reconfigure dynamic parameters](#reconfigure-dynamic-parameters). - * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make reconfiguration effective. Follow the instructions in [Reconfigure static parameters](#reconfigure-static-parameters). + * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter configuration takes effect. There are two different configuration strategies based on the effectiveness type of modified parameters, i.e. **dynamic** and **static**. + * When `Dynamic` is `true`, it means the effectiveness type of parameters is **dynamic** and can be configured online. Follow the instructions in [Configure dynamic parameters](#configure-dynamic-parameters). + * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make configuration effective. Follow the instructions in [Configure static parameters](#configure-static-parameters). * Description: It describes the parameter definition. -## Reconfigure parameters with --set flag +## Configure parameters -### Reconfigure dynamic parameters +### Configure parameters with configure command -The example below reconfigures `max_connection` and `innodb_buffer_pool_size`. +The example below takes configuring `max_connection` and `innodb_buffer_pool_size` as an example. 1. View the current values of `max_connection` and `innodb_buffer_pool_size`. @@ -120,9 +120,9 @@ The example below reconfigures `max_connection` and `innodb_buffer_pool_size`. ::: -3. Search the status of the parameter reconfiguration. +3. Search the status of the parameter configuration. - `Status.Progress` shows the overall status of the parameter reconfiguration and `Conditions` show the details. + `Status.Progress` shows the overall status of the parameter configuration and `Conditions` show the details. ```bash kbcli cluster describe-ops mysql-cluster-reconfiguring-z2wvn -n default @@ -158,7 +158,7 @@ The example below reconfigures `max_connection` and `innodb_buffer_pool_size`. -4. Connect to the database to verify whether the parameters are modified. +4. Connect to the database to verify whether the parameters are configured as expected. The whole searching process has a 30-second delay since it takes some time for kubelet to synchronize modifications to the volume of the pod. @@ -188,110 +188,9 @@ The example below reconfigures `max_connection` and `innodb_buffer_pool_size`. 1 row in set (0.00 sec) ``` -### Reconfigure static parameters +### Configure parameters with edit-config command -Static parameter reconfiguring requires restarting the pod. The following example reconfigures `ngram_token_size`. - -1. Search the current value of `ngram_token_size` and the default value is 2. - - ```bash - kbcli cluster connect mysql-cluster - ``` - - ```bash - mysql> show variables like '%ngram_token_size%'; - > - +------------------+-------+ - | Variable_name | Value | - +------------------+-------+ - | ngram_token_size | 2 | - +------------------+-------+ - 1 row in set (0.01 sec) - ``` - -2. Adjust the value of `ngram_token_size`. - - ```bash - kbcli cluster configure mysql-cluster --set=ngram_token_size=6 - ``` - - :::note - - Make sure the value you set is within the Allowed Values of this parameter. Otherwise, the reconfiguration may fail. - - ::: - -3. View the status of the parameter reconfiguration. - - `Status.Progress` and `Status.Status` shows the overall status of the parameter reconfiguration and Conditions show the details. - - When the `Status.Status` shows `Succeed`, the reconfiguration is completed. - -
- - Output - - ```bash - # In progress - kbcli cluster describe-ops mysql-cluster-reconfiguring-nrnpf -n default - > - Spec: - Name: mysql-cluster-reconfiguring-nrnpf NameSpace: default Cluster: mysql-cluster Type: Reconfiguring - - Command: - kbcli cluster configure mysql-cluster --component-names=mysql --template-name=mysql-consensusset-config --config-file=my.cnf --set ngram_token_size=6 - - Status: - Start Time: Mar 13,2023 03:37 UTC+0800 - Duration: 22s - Status: Running - Progress: 0/1 - OBJECT-KEY STATUS DURATION MESSAGE - ``` - - ```bash - # Parameter reconfiguration is completed - kbcli cluster describe-ops mysql-cluster-reconfiguring-nrnpf -n default - > - Spec: - Name: mysql-cluster-reconfiguring-nrnpf NameSpace: default Cluster: mysql-cluster Type: Reconfiguring - - Command: - kbcli cluster configure mysql-cluster --component-names=mysql --template-name=mysql-consensusset-config --config-file=my.cnf --set ngram_token_size=6 - - Status: - Start Time: Mar 13,2023 03:37 UTC+0800 - Completion Time: Mar 13,2023 03:37 UTC+0800 - Duration: 26s - Status: Succeed - Progress: 1/1 - OBJECT-KEY STATUS DURATION MESSAGE - ``` - -
- -4. Connect to the database to verify whether the parameters are modified. - - The whole searching process has a 30-second delay since it takes some time for kubelete to synchronize modifications to the volume of the pod. - - ```bash - kbcli cluster connect mysql-cluster - ``` - - ```bash - mysql> show variables like '%ngram_token_size%'; - > - +------------------+-------+ - | Variable_name | Value | - +------------------+-------+ - | ngram_token_size | 6 | - +------------------+-------+ - 1 row in set (0.09 sec) - ``` - -## Reconfigure parameters with edit-config - -For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. +For your convenience, KubeBlocks offers a tool `edit-config` to help you to configure parameter in a visulized way. For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. @@ -310,13 +209,13 @@ The following steps take configuring MySQL Standalone as an example. ::: -2. View the status of the parameter reconfiguration. +2. View the status of the parameter configuration. ```bash kbcli cluster describe-ops xxx -n default ``` -3. Connect to the database to verify whether the parameters are modified +3. Connect to the database to verify whether the parameters are configured as expected. ```bash kbcli cluster connect mysql-cluster @@ -325,15 +224,15 @@ The following steps take configuring MySQL Standalone as an example. :::note 1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. -2. Deleting a parameter will be supported in later version. +2. Deleting a parameter will be supported later. ::: ## View history and compare differences -After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. +After the configuration is completed, you can search the configuration history and compare the parameter differences. -View the parameter reconfiguration history. +View the parameter configuration history. ```bash kbcli cluster describe-config mysql-cluster diff --git a/docs/user_docs/kubeblocks-for-postgresql/cluster-management/restart-a-postgresql-cluster.md b/docs/user_docs/kubeblocks-for-postgresql/cluster-management/restart-a-postgresql-cluster.md index 6ac3ff8d308..904bc06ba93 100644 --- a/docs/user_docs/kubeblocks-for-postgresql/cluster-management/restart-a-postgresql-cluster.md +++ b/docs/user_docs/kubeblocks-for-postgresql/cluster-management/restart-a-postgresql-cluster.md @@ -28,7 +28,7 @@ Restarting a PostgreSQL cluster triggers a concurrent restart and the leader may Configure the values of `components` and `ttlSecondsAfterSucceed` and run the command below to restart a specified cluster. ```bash - kbcli cluster restart NAME --components="pg-replication" \ + kbcli cluster restart NAME --components="postgresql" \ --ttlSecondsAfterSucceed=30 ``` @@ -49,7 +49,7 @@ Restarting a PostgreSQL cluster triggers a concurrent restart and the leader may clusterRef: pg-cluster type: Restart restart: - - componentName: pg-replication + - componentName: postgresql EOF ``` diff --git a/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md b/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md index 0911e788bc6..80d43cb2362 100644 --- a/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-postgresql/configuration/configuration.md @@ -7,7 +7,7 @@ sidebar_position: 1 # Configure cluster parameters -The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified interface to facilitate managing parameter configuration, searching the parameter user guide, and validating parameter effectiveness. From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. @@ -60,16 +60,16 @@ You can also view the details of this configuration file and parameters. * Allowed Values: It defines the valid value range of this parameter. - * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter reconfiguration takes effect. There are two different reconfiguration strategies based on the effectiveness type of modified parameters, i.e. **dynamic** and **static**. - * When `Dynamic` is `true`, it means the effectiveness type of parameters is **dynamic** and can be updated online. Follow the instructions in [Reconfigure dynamic parameters](#reconfigure-dynamic-parameters). - * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make reconfiguration effective. Follow the instructions in [Reconfigure static parameters](#reconfigure-static-parameters). + * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter configuration takes effect. There are two different configuration strategies based on the effectiveness type of modified parameters, i.e. **dynamic** and **static**. + * When `Dynamic` is `true`, it means the effectiveness type of parameters is **dynamic** and can be configured online. Follow the instructions in [Configure dynamic parameters](#configure-dynamic-parameters). + * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make configuration effective. Follow the instructions in [Configure static parameters](#configure-static-parameters). * Description: It describes the parameter definition. -## Reconfigure parameters with config command +## Configure parameters -### Reconfigure dynamic parameters +### Configure parameters with configure command -The example below reconfigures `max_connections`. +The example below takes configuring `max_connections` as an example. 1. View the current values of `max_connections`. @@ -105,11 +105,11 @@ The example below reconfigures `max_connections`. ::: -3. View the status of the parameter reconfiguration. +3. View the status of the parameter configuration. - `Status.Progress` and `Status.Status` shows the overall status of the parameter reconfiguration and `Conditions` show the details. + `Status.Progress` and `Status.Status` shows the overall status of the parameter configuration and `Conditions` show the details. - When the `Status.Status` shows `Succeed`, the reconfiguration is completed. + When the `Status.Status` shows `Succeed`, the configuration is completed. ```bash kbcli cluster describe-ops pg-cluster-reconfiguring-fq6q7 -n default @@ -146,7 +146,7 @@ The example below reconfigures `max_connections`. -4. Connect to the database to verify whether the parameters are modified. +4. Connect to the database to verify whether the parameter is configured as expected. The whole searching process has a 30-second delay since it takes some time for kubelet to synchronize modifications to the volume of the pod. @@ -162,102 +162,9 @@ The example below reconfigures `max_connections`. (1 row) ``` -### Reconfigure static parameters +### Configure parameters with edit-config command -The example below reconfigures `shared_buffers`. - -1. View the current values of `shared_buffers`. - - ```bash - kbcli cluster connect pg-cluster - ``` - - ```bash - postgres=# show shared_buffers; - shared_buffers - ---------------- - 256MB - (1 row) - ``` - -2. Adjust the values of `shared_buffers`. - - ```bash - kbcli cluster configure pg-cluster --set=shared_buffers=512M - ``` - - :::note - - Make sure the value you set is within the Allowed Values of this parameter. If you set a value that does not meet the value range, the system prompts an error. For example, - - ```bash - kbcli cluster configure pg-cluster --set=shared_buffers=5M - error: failed to validate updated config: [failed to cue template render configure: [pg.maxclients: invalid value 5 (out of bound 16-107374182): - 343:34 - ] - ] - ``` - - ::: - -3. View the status of the parameter reconfiguration. - - `Status.Progress` and `Status.Status` shows the overall status of the parameter reconfiguration and `Conditions` show the details. - - When the `Status.Status` shows `Succeed`, the reconfiguration is completed. - - ```bash - kbcli cluster describe-ops pg-cluster-reconfiguring-rcnzb -n default - ``` - -
- - Output - - ```bash - Spec: - Name: pg-cluster-reconfiguring-rcnzb NameSpace: default Cluster: pg-cluster Type: Reconfiguring - - Command: - kbcli cluster configure pg-cluster --components=postgresql --config-spec=postgresql-configuration --config-file=postgresql.conf --set shared_buffers=512M --namespace=default - - Status: - Start Time: Mar 17,2023 19:31 UTC+0800 - Duration: 2s - Status: Running - Progress: 0/1 - OBJECT-KEY STATUS DURATION MESSAGE - - Conditions: - LAST-TRANSITION-TIME TYPE REASON STATUS MESSAGE - Mar 17,2023 19:31 UTC+0800 Progressing OpsRequestProgressingStarted True Start to process the OpsRequest: pg-cluster-reconfiguring-rcnzb in Cluster: pg-cluster - Mar 17,2023 19:31 UTC+0800 Validated ValidateOpsRequestPassed True OpsRequest: pg-cluster-reconfiguring-rcnzb is validated - Mar 17,2023 19:31 UTC+0800 Reconfigure ReconfigureStarted True Start to reconfigure in Cluster: pg-cluster, Component: postgresql - Mar 17,2023 19:31 UTC+0800 ReconfigureMerged ReconfigureMerged True Reconfiguring in Cluster: pg-cluster, Component: postgresql, ConfigSpec: postgresql-configuration, info: updated: map[postgresql.conf:{"shared_buffers":"512M"}], added: map[], deleted:map[] - Mar 17,2023 19:31 UTC+0800 ReconfigureRunning ReconfigureRunning True Reconfiguring in Cluster: pg-cluster, Component: postgresql, ConfigSpec: postgresql-configuration - ``` - -
- -4. Connect to the database to verify whether the parameters are modified. - - The whole searching process has a 30-second delay since it takes some time for kubelete to synchronize modifications to the volume of the pod. - - ```bash - kbcli cluster connect pg-cluster - ``` - - ```bash - postgres=# show shared_buffers; - shared_buffers - ---------------- - 512MB - (1 row) - ``` - -## Reconfigure parameters with edit-config command - -For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. +For your convenience, KubeBlocks offers a tool `edit-config` to help you to configure parameter in a visulized way. For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. @@ -273,13 +180,13 @@ If there are multiple components in a cluster, use `--component` to specify a co ::: -2. View the status of the parameter reconfiguration. +2. View the status of the parameter configuration. ```bash kbcli cluster describe-ops xxx -n default ``` -3. Connect to the database to verify whether the parameters are modified +3. Connect to the database to verify whether the parameters are configured as expected. ```bash kbcli cluster connect pg-cluster @@ -288,15 +195,15 @@ If there are multiple components in a cluster, use `--component` to specify a co :::note 1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. -2. Deleting a parameter will be supported in later version. +2. Deleting a parameter will be supported later. ::: ## View history and compare differences -After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. +After the configuration is completed, you can search the configuration history and compare the parameter differences. -View the parameter reconfiguration history. +View the parameter configuration history. ```bash kbcli cluster describe-config pg-cluster diff --git a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md index a37e2a10b36..d0a04d1116e 100644 --- a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md +++ b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md @@ -15,13 +15,14 @@ KubeBlocks supports Pulsar's daily operations, including basic lifecycle operati ## Environment Recommendation Refer to the Pulsar official document for the configuration, such as memory, cup, and storage, of each component. + | Components | Replicas | | :-------------------- | :------------------------------------------------------------------------ | | zookeeper | 1 for test environment or 3 for production environment | | bookies | at lease 3 for test environment, at lease 4 for production environment | | broker | at least 1, for production environment, 3 replicas recommended | -| recovery(Optional) | 1; if autoRecovery is not enabled for bookie, at least 3 replicas needed | -| proxy(Optional) | 1; and for production environment, 3 replicas needed | +| recovery (Optional) | 1; if autoRecovery is not enabled for bookie, at least 3 replicas needed | +| proxy (Optional) | 1; and for production environment, 3 replicas needed | ## Create Pulsar cluster diff --git a/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md b/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md index ab7f997a21d..1f0983f4b6b 100644 --- a/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-pulsar/configuration/configuration.md @@ -40,12 +40,17 @@ kbcli cluster describe-config pulsar * View the parameter description. ```bash - kbcli cluster explain-config pulsar |head -n 20 + kbcli cluster explain-config pulsar | head -n 20 ``` -## Reconfigure environment parameters +## Configure parameters + +### Configure parameters with configure command + +#### Configure environment parameters ***Steps*** + 1. You need to specify the component name to configure parameters. Get the pulsar cluster component name. ```bash @@ -87,9 +92,9 @@ kbcli cluster describe-config pulsar kubectl get pod -l app.kubernetes.io/name=pulsar ``` -## Reconfigure dynamic parameters +#### Configure other parameters -The following steps take the reconfiguration of dynamic parameter `brokerShutdownTimeoutMs` as an example. +The following steps take the configuration of dynamic parameter `brokerShutdownTimeoutMs` as an example. ***Steps*** @@ -105,7 +110,7 @@ The following steps take the reconfiguration of dynamic parameter `brokerShutdow broker-config broker.conf true pulsar-broker-config-tpl brokers-config-constraints pulsar-broker-broker-config broker pulsar ``` -2. Reconfigure parameters. +2. Configure parameters. ```bash kbcli cluster configure pulsar --component=broker --config-spec=broker-config --set brokerShutdownTimeoutMs=66600 @@ -138,69 +143,9 @@ The following steps take the reconfiguration of dynamic parameter `brokerShutdow OBJECT-KEY STATUS DURATION MESSAGE ``` -## Reconfigure static parameters - -Static parameter reconfiguring requires restarting the pod. The following example reconfigures `lostBookieRecoveryDelay`. - -1. Get the current configuration. - - ```bash - kbcli cluster explain-config pulsar --component=broker - > - ConfigSpecs Meta: - CONFIG-SPEC-NAME FILE ENABLED TEMPLATE CONSTRAINT RENDERED COMPONENT CLUSTER - agamotto-configuration agamotto-config.yaml false pulsar-agamotto-conf-tpl pulsar-broker-agamotto-configuration broker pulsar - broker-env conf true pulsar-broker-env-tpl pulsar-env-constraints pulsar-broker-broker-env broker pulsar - broker-config broker.conf true pulsar-broker-config-tpl brokers-config-constraints pulsar-broker-broker-config broker pulsar - ``` - -2. Adjust the value of `lostBookieRecoveryDelay`. - - ```bash - kbcli cluster configure pulsar --component=broker --config-spec=broker-config --set lostBookieRecoveryDelay=1000 - ``` - - :::note - - The change of parameters may cause the restart of the cluster. Enter `yes` to confirm it. - - ::: - - ***Example*** - - ```bash - kbcli cluster configure pulsar --component=broker --config-spec=broker-config --set lostBookieRecoveryDelay=1000 - > - Warning: The parameter change incurs a cluster restart, which brings the cluster down for a while. Enter to continue... - Please type "yes" to confirm: yes - Will updated configure file meta: - ConfigSpec: broker-config ConfigFile: broker.conf ComponentName: broker ClusterName: pulsar - OpsRequest pulsar-reconfiguring-gmz7w created successfully, you can view the progress: - kbcli cluster describe-ops pulsar-reconfiguring-gmz7w -n default - ``` - -3. View the status of the parameter reconfiguration. - - ```bash - kbcli cluster describe-ops pulsar-reconfiguring-gmz7w -n default - > - Spec: - Name: pulsar-reconfiguring-gmz7w NameSpace: default Cluster: pulsar Type: Reconfiguring - - Command: - kbcli cluster configure pulsar --components=broker --config-spec=broker-config --config-file=broker.conf --set lostBookieRecoveryDelay=1000 --namespace=default - - Status: - Start Time: Jul 20,2023 09:57 UTC+0800 - Duration: 57s - Status: Running - Progress: 1/2 - OBJECT-KEY STATUS DURATION MESSAGE - ``` - -## Reconfigure parameters with edit-config +### Configure parameters with edit-config command -For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. +For your convenience, KubeBlocks offers a tool `edit-config` to help you to configure parameter in a visulized way. For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. @@ -216,13 +161,13 @@ If there are multiple components in a cluster, use `--component` to specify a co ::: -2. View the status of the parameter reconfiguration. +2. View the status of the parameter configuration. ```bash kbcli cluster describe-ops xxx -n default ``` -3. Connect to the database to verify whether the parameters are modified +3. Connect to the database to verify whether the parameters are configured as expected. ```bash kbcli cluster connect pulsar @@ -231,13 +176,13 @@ If there are multiple components in a cluster, use `--component` to specify a co :::note 1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. -2. Deleting a parameter will be supported in later version. +2. Deleting a parameter will be supported later. ::: -## Reconfigure with kubectl +### Configure parameters with kubectl -Using kubectl to reconfigure pulsar cluster requires modifying the configuration file. +Using kubectl to configure pulsar cluster requires modifying the configuration file. ***Steps*** diff --git a/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md b/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md index ad5b3966ad4..8dff5a7010d 100644 --- a/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md +++ b/docs/user_docs/kubeblocks-for-redis/configuration/configuration.md @@ -7,7 +7,7 @@ sidebar_position: 1 # Configure cluster parameters -The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter reconfiguration, searching the parameter user guide, and validating parameter effectiveness. +The KubeBlocks configuration function provides a set of consistent default configuration generation strategies for all the databases running on KubeBlocks and also provides a unified parameter configuration interface to facilitate managing parameter configuration, searching the parameter user guide, and validating parameter effectiveness. From v0.6.0, KubeBlocks supports `kbcli cluster configure` and `kbcli cluster edit-config` to configure parameters. The difference is that KubeBlocks configures parameters automatically with `kbcli cluster configure` but `kbcli cluster edit-config` provides a visualized way for you to edit parameters directly. @@ -60,16 +60,16 @@ You can also view the details of this configuration file and parameters. * Allowed Values: It defines the valid value range of this parameter. - * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter reconfiguration takes effect. There are two different reconfiguration strategies based on the effectiveness type of modified parameters, i.e. **dynamic** and **static**. - * When `Dynamic` is `true`, it means the effectiveness type of parameters is **dynamic** and can be updated online. Follow the instructions in [Reconfigure dynamic parameters](#reconfigure-dynamic-parameters). - * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make reconfiguration effective. Follow the instructions in [Reconfigure static parameters](#reconfigure-static-parameters). + * Dynamic: The value of `Dynamic` in `Configure Constraint` defines how the parameter configuration takes effect. There are two different configuration strategies based on the effectiveness type of modified parameters, i.e. **dynamic** and **static**. + * When `Dynamic` is `true`, it means the effectiveness type of parameters is **dynamic** and can be updated online. Follow the instructions in [Configure dynamic parameters](#configure-dynamic-parameters). + * When `Dynamic` is `false`, it means the effectiveness type of parameters is **static** and a pod restarting is required to make configuration effective. Follow the instructions in [Configure static parameters](#configure-static-parameters). * Description: It describes the parameter definition. -## Reconfigure parameters with --set flag +## Configure parameters -### Reconfigure dynamic parameters +### Configure parameters with configure command -The example below reconfigures `acllog-max-len`. +The example below configures `acllog-max-len`. 1. View the current values of `acllog-max-len`. @@ -108,11 +108,11 @@ The example below reconfigures `acllog-max-len`. ::: -3. View the status of the parameter reconfiguration. +3. View the status of the parameter configuration. - `Status.Progress` and `Status.Status` shows the overall status of the parameter reconfiguration and `Conditions` show the details. + `Status.Progress` and `Status.Status` shows the overall status of the parameter configuration and `Conditions` show the details. - When the `Status.Status` shows `Succeed`, the reconfiguration is completed. + When the `Status.Status` shows `Succeed`, the configuration is completed. ```bash kbcli cluster describe-ops redis-cluster-reconfiguring-zjztm -n default @@ -145,7 +145,7 @@ The example below reconfigures `acllog-max-len`. -4. Connect to the database to verify whether the parameters are modified. +4. Connect to the database to verify whether the parameter is configured as expected. The whole searching process has a 30-second delay since it takes some time for kubelet to synchronize modifications to the volume of the pod. @@ -159,106 +159,9 @@ The example below reconfigures `acllog-max-len`. 2) "256" ``` -### Reconfigure static parameters +### Configure parameters with edit-config command -The example below reconfigures `maxclients` and `databases`. - -1. View the current values of `maxclients` and `databases`. - - ```bash - kbcli cluster connect redis-cluster - ``` - - ```bash - 127.0.0.1:6379> config get parameter maxclients databases - 1) "databases" - 2) "16" - 3) "maxclients" - 4) "10000" - ``` - -2. Adjust the values of `maxclients` and `databases`. - - ```bash - kbcli cluster configure redis-cluster --component=redis --set=maxclients=20000,databases=32 - ``` - - :::note - - Make sure the value you set is within the Allowed Values of this parameter. If you set a value that does not meet the value range, the system prompts an error. For example, - - ```bash - kbcli cluster configure redis-cluster --component=redis --set=maxclients=65001 - > - error: failed to validate updated config: [failed to cue template render configure: [configuration.maxclients: 2 errors in empty disjunction: - configuration.maxclients: conflicting values 65000 and 65001: - 100:37 - 155:16 - configuration.maxclients: invalid value 65001 (out of bound <=65000): - 100:26 - ] - ] - ``` - - ::: - -3. View the status of the parameter reconfiguration. - - `Status.Progress` and `Status.Status` shows the overall status of the parameter reconfiguration and `Conditions` show the details. - - When the `Status.Status` shows `Succeed`, the reconfiguration is completed. - - ```bash - kbcli cluster describe-ops redis-cluster-reconfiguring-zrkq7 -n default - ``` - -
- Output - - ```bash - Spec: - Name: redis-cluster-reconfiguring-zrkq7 NameSpace: default Cluster: redis-cluster Type: Reconfiguring - - Command: - kbcli cluster configure redis-cluster --components=redis --config-spec=redis-replication-config --config-file=redis.conf --set databases=32 --set maxclients=20000 --namespace=default - - Status: - Start Time: Apr 17,2023 17:28 UTC+0800 - Duration: 2s - Status: Running - Progress: 0/1 - OBJECT-KEY STATUS DURATION MESSAGE - - Conditions: - LAST-TRANSITION-TIME TYPE REASON STATUS MESSAGE - Apr 17,2023 17:28 UTC+0800 Progressing OpsRequestProgressingStarted True Start to process the OpsRequest: redis-cluster-reconfiguring-zrkq7 in Cluster: redis-cluster - Apr 17,2023 17:28 UTC+0800 Validated ValidateOpsRequestPassed True OpsRequest: redis-cluster-reconfiguring-zrkq7 is validated - Apr 17,2023 17:28 UTC+0800 Reconfigure ReconfigureStarted True Start to reconfigure in Cluster: redis-cluster, Component: redis - Apr 17,2023 17:28 UTC+0800 ReconfigureMerged ReconfigureMerged True Reconfiguring in Cluster: redis-cluster, Component: redis, ConfigSpec: redis-replication-config, info: updated: map[redis.conf:{"databases":"32","maxclients":"20000"}], added: map[], deleted:map[] - Apr 17,2023 17:28 UTC+0800 ReconfigureRunning ReconfigureRunning True Reconfiguring in Cluster: redis-cluster, Component: redis, ConfigSpec: redis-replication-config - ``` - -
- -4. Connect to the database to verify whether the parameters are modified. - - The whole searching process has a 30-second delay since it takes some time for kubelete to synchronize modifications to the volume of the pod. - - ```bash - kbcli cluster connect redis-cluster - ``` - - ```bash - 127.0.0.1:6379> config get parameter maxclients databases - 1) "databases" - 2) "32" - 3) "maxclients" - 4) "20000" - ``` - -## Reconfigure parameters with edit-config - -For your convenience, kbcli offers a tool `edit-config` to help you to configure parameter in a visulized way. +For your convenience, KubeBlocks offers a tool `edit-config` to help you to configure parameter in a visulized way. For Linux and macOS, you can edit configuration files by vi. For Windows, you can edit files on notepad. @@ -274,13 +177,13 @@ If there are multiple components in a cluster, use `--component` to specify a co ::: -2. View the status of the parameter reconfiguration. +2. View the status of the parameter configuration. ```bash kbcli cluster describe-ops xxx -n default ``` -3. Connect to the database to verify whether the parameters are modified +3. Connect to the database to verify whether the parameters are configured as expected. ```bash kbcli cluster connect redis-cluster @@ -289,15 +192,15 @@ If there are multiple components in a cluster, use `--component` to specify a co :::note 1. For the `edit-config` function, static parameters and dynamic parameters cannot be edited at the same time. -2. Deleting a parameter will be supported in later version. +2. Deleting a parameter will be supported later. ::: ## View history and compare differences -After the reconfiguration is completed, you can search the reconfiguration history and compare the parameter differences. +After the configuration is completed, you can search the configuration history and compare the parameter differences. -View the parameter reconfiguration history. +View the parameter configuration history. ```bash kbcli cluster describe-config redis-cluster --component=redis From 2f7922df3080c210f014842ef14b0ac3abd43f6b Mon Sep 17 00:00:00 2001 From: a le <101848970+1aal@users.noreply.github.com> Date: Mon, 25 Sep 2023 14:44:38 +0800 Subject: [PATCH 26/58] fix: kbcli kubeblocks upgrade malformed version (#5239) --- internal/cli/cmd/kubeblocks/upgrade.go | 3 ++- internal/cli/cmd/kubeblocks/upgrade_test.go | 26 ++++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/internal/cli/cmd/kubeblocks/upgrade.go b/internal/cli/cmd/kubeblocks/upgrade.go index 3592ae09457..667ce1d5c13 100644 --- a/internal/cli/cmd/kubeblocks/upgrade.go +++ b/internal/cli/cmd/kubeblocks/upgrade.go @@ -120,7 +120,8 @@ func (o *InstallOptions) Upgrade() error { } // double check for KubeBlocks upgrade - if !o.autoApprove { + // and only check when KubeBlocks version change + if !o.autoApprove && o.Version != "" { oldVersion, err := version.NewVersion(kbVersion) if err != nil { return err diff --git a/internal/cli/cmd/kubeblocks/upgrade_test.go b/internal/cli/cmd/kubeblocks/upgrade_test.go index e3bbeb2d0d0..4dabe696c8a 100644 --- a/internal/cli/cmd/kubeblocks/upgrade_test.go +++ b/internal/cli/cmd/kubeblocks/upgrade_test.go @@ -72,7 +72,7 @@ var _ = Describe("kubeblocks upgrade", func() { Expect(o.Namespace).To(Equal("test")) }) - It("run upgrade double-check", func() { + It("double-check when version change", func() { mockDeploy := func() *appsv1.Deployment { deploy := &appsv1.Deployment{} deploy.SetLabels(map[string]string{ @@ -101,6 +101,30 @@ var _ = Describe("kubeblocks upgrade", func() { }) + It("helm ValueOpts upgrade", func() { + mockDeploy := func() *appsv1.Deployment { + deploy := &appsv1.Deployment{} + deploy.SetLabels(map[string]string{ + "app.kubernetes.io/name": types.KubeBlocksChartName, + "app.kubernetes.io/version": "0.3.0", + }) + return deploy + } + + o := &InstallOptions{ + Options: Options{ + IOStreams: streams, + HelmCfg: helm.NewFakeConfig(namespace), + Namespace: "default", + Client: testing.FakeClientSet(mockDeploy()), + Dynamic: testing.FakeDynamicClient(), + }, + Version: "", + } + o.ValueOpts.Values = []string{"replicaCount=2"} + Expect(o.Upgrade()).Should(Succeed()) + }) + It("run upgrade", func() { mockDeploy := func() *appsv1.Deployment { deploy := &appsv1.Deployment{} From 9b56e88b12f7fa2993b625bbb6cab9700e811cea Mon Sep 17 00:00:00 2001 From: free6om Date: Mon, 25 Sep 2023 16:19:54 +0800 Subject: [PATCH 27/58] fix: FAIL Cluster Controller test cluster Failed/Abnormal phase (#5248) --- controllers/apps/cluster_controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 2e674c3430b..56620849518 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -2500,13 +2500,13 @@ var _ = Describe("Cluster Controller", func() { // })).Should(Succeed()) By("test when clusterVersion not Available") - _ = testapps.CreateConsensusMysqlClusterDef(&testCtx, clusterDefNameRand, consensusCompDefName) clusterVersion := testapps.CreateConsensusMysqlClusterVersion(&testCtx, clusterDefNameRand, clusterVersionNameRand, consensusCompDefName) clusterVersionKey := client.ObjectKeyFromObject(clusterVersion) // mock clusterVersion unavailable Expect(testapps.GetAndChangeObj(&testCtx, clusterVersionKey, func(clusterVersion *appsv1alpha1.ClusterVersion) { clusterVersion.Spec.ComponentVersions[0].ComponentDefRef = "test-n" })()).ShouldNot(HaveOccurred()) + _ = testapps.CreateConsensusMysqlClusterDef(&testCtx, clusterDefNameRand, consensusCompDefName) Eventually(testapps.CheckObj(&testCtx, clusterVersionKey, func(g Gomega, clusterVersion *appsv1alpha1.ClusterVersion) { g.Expect(clusterVersion.Status.Phase).Should(Equal(appsv1alpha1.UnavailablePhase)) From 12ba019cdce944485712ee9f65d67f86a60df7e4 Mon Sep 17 00:00:00 2001 From: yijing Date: Mon, 25 Sep 2023 16:53:21 +0800 Subject: [PATCH 28/58] chore: fix typo in polardbx chart (#5252) --- deploy/polardbx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/polardbx/Chart.yaml b/deploy/polardbx/Chart.yaml index 914355e1f54..e78eb94f5a5 100644 --- a/deploy/polardbx/Chart.yaml +++ b/deploy/polardbx/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: polardbx-cluster +name: polardbx description: PolarDB-X Cluster Helm Chart for KubeBlocks. type: application From 0c94881d0589c0691facca3dd81662f77b9f53a2 Mon Sep 17 00:00:00 2001 From: yuanyuan zhang <111744220+michelle-0808@users.noreply.github.com> Date: Mon, 25 Sep 2023 19:25:41 +0800 Subject: [PATCH 29/58] docs: fix typos in mysql intro (#5255) --- .../apecloud-mysql-intro/apecloud-mysql-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user_docs/kubeblocks-for-mysql/apecloud-mysql-intro/apecloud-mysql-intro.md b/docs/user_docs/kubeblocks-for-mysql/apecloud-mysql-intro/apecloud-mysql-intro.md index 5b96addf49a..fe1e57c2774 100644 --- a/docs/user_docs/kubeblocks-for-mysql/apecloud-mysql-intro/apecloud-mysql-intro.md +++ b/docs/user_docs/kubeblocks-for-mysql/apecloud-mysql-intro/apecloud-mysql-intro.md @@ -24,7 +24,7 @@ ApeCloud MySQL supports four roles, **Leader**, **Follower**, **Candidate**, and Role | Leader |Follower | Learner | Candidate | ---- |----| ----|----|----| - **Capcity**|RW/HA|RO/HA|RO|-| + **Capability**|RW/HA|RO/HA|RO|-| ![Role_changing](./../../../img/intro_role_changing.png) From 68d818a1cd8893eaa8ddd12a40428e1cafb9844f Mon Sep 17 00:00:00 2001 From: Shanshan Date: Mon, 25 Sep 2023 22:55:27 +0800 Subject: [PATCH 30/58] fix: datascript supports specifing pods using selectors (#5257) --- apis/apps/v1alpha1/opsrequest_types.go | 7 + apis/apps/v1alpha1/zz_generated.deepcopy.go | 5 + .../bases/apps.kubeblocks.io_opsrequests.yaml | 52 +++ controllers/apps/operations/datascript.go | 307 +++++++++++------- .../apps/operations/datascript_test.go | 12 +- .../crds/apps.kubeblocks.io_opsrequests.yaml | 52 +++ docker/Dockerfile-datascript | 6 +- lorry/engine/engine_test.go | 4 +- lorry/engine/redis.go | 14 +- 9 files changed, 330 insertions(+), 129 deletions(-) diff --git a/apis/apps/v1alpha1/opsrequest_types.go b/apis/apps/v1alpha1/opsrequest_types.go index 1293d70b680..91b8632a4fd 100644 --- a/apis/apps/v1alpha1/opsrequest_types.go +++ b/apis/apps/v1alpha1/opsrequest_types.go @@ -354,6 +354,13 @@ type ScriptSpec struct { // +optional // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.scriptSpec.scriptFrom" ScriptFrom *ScriptFrom `json:"scriptFrom,omitempty"` + // KubeBlocks, by default, will execute the script on the primary pod, with role=leader. + // There are some exceptions, such as Redis, which does not synchronize accounts info between primary and secondary. + // In this case, we need to execute the script on all pods, matching the selector. + // selector indicates the components on which the script is executed. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.scriptSpec.script.selector" + Selector *metav1.LabelSelector `json:"selector,omitempty"` } type BackupSpec struct { diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index d4a7a5d3d03..04a4a3d6af3 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -3493,6 +3493,11 @@ func (in *ScriptSpec) DeepCopyInto(out *ScriptSpec) { *out = new(ScriptFrom) (*in).DeepCopyInto(*out) } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptSpec. diff --git a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml index 140b9cbb572..de028334380 100644 --- a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml +++ b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml @@ -418,6 +418,58 @@ spec: required: - name type: object + selector: + description: KubeBlocks, by default, will execute the script on + the primary pod, with role=leader. There are some exceptions, + such as Redis, which does not synchronize accounts info between + primary and secondary. In this case, we need to execute the + script on all pods, matching the selector. selector indicates + the components on which the script is executed. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-validations: + - message: forbidden to update spec.scriptSpec.script.selector + rule: self == oldSelf required: - componentName type: object diff --git a/controllers/apps/operations/datascript.go b/controllers/apps/operations/datascript.go index 1980521de12..c44f3518a82 100644 --- a/controllers/apps/operations/datascript.go +++ b/controllers/apps/operations/datascript.go @@ -24,6 +24,8 @@ import ( "strings" "time" + "github.com/sethvargo/go-password/password" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -97,11 +99,16 @@ func (o DataScriptOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.C } // create jobs - if job, err := buildDataScriptJob(reqCtx, cli, opsResource.Cluster, component, opsRequest, componentDef.CharacterType); err != nil { + var jobs []*batchv1.Job + if jobs, err = buildDataScriptJobs(reqCtx, cli, opsResource.Cluster, component, opsRequest, componentDef.CharacterType); err != nil { return err - } else { - return cli.Create(reqCtx.Ctx, job) } + for _, job := range jobs { + if err = cli.Create(reqCtx.Ctx, job); err != nil { + return err + } + } + return nil } // ReconcileAction implements OpsHandler.ReconcileAction @@ -114,37 +121,60 @@ func (o DataScriptOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCtx, cli cluster := opsResource.Cluster spec := opsRequest.Spec.ScriptSpec - getStatusFromJobCondition := func(job *batchv1.Job) appsv1alpha1.OpsPhase { + meetsJobConditions := func(job *batchv1.Job, condType batchv1.JobConditionType, condStatus corev1.ConditionStatus) bool { for _, condition := range job.Status.Conditions { - if condition.Type == batchv1.JobComplete && condition.Status == corev1.ConditionTrue { - return appsv1alpha1.OpsSucceedPhase - } else if condition.Type == batchv1.JobFailed && condition.Status == corev1.ConditionTrue { - return appsv1alpha1.OpsFailedPhase + if condition.Type == condType && condition.Status == condStatus { + return true } } - return appsv1alpha1.OpsRunningPhase + return false } // retrieve job for this opsRequest jobList := &batchv1.JobList{} - err := cli.List(reqCtx.Ctx, jobList, client.InNamespace(cluster.Namespace), client.MatchingLabels(getDataScriptJobLabels(cluster.Name, spec.ComponentName, opsRequest.Name))) - if err != nil { + if err := cli.List(reqCtx.Ctx, jobList, client.InNamespace(cluster.Namespace), client.MatchingLabels(getDataScriptJobLabels(cluster.Name, spec.ComponentName, opsRequest.Name))); err != nil { return appsv1alpha1.OpsFailedPhase, 0, err - } - - if len(jobList.Items) == 0 { + } else if len(jobList.Items) == 0 { return appsv1alpha1.OpsFailedPhase, 0, fmt.Errorf("job not found") } + + var ( + expectedCount int + succedCount int + failedCount int + ) + + expectedCount = len(jobList.Items) // check job status - job := &jobList.Items[0] - phase := getStatusFromJobCondition(job) - // jobs are owned by opsRequest, so we don't need to delete them explicitly - if phase == appsv1alpha1.OpsFailedPhase { - return phase, 0, fmt.Errorf("job execution failed, please check the job log with `kubectl logs jobs/%s -n %s`", job.Name, job.Namespace) - } else if phase == appsv1alpha1.OpsSucceedPhase { - return phase, 0, nil + for _, job := range jobList.Items { + if meetsJobConditions(&job, batchv1.JobComplete, corev1.ConditionTrue) { + succedCount++ + } else if meetsJobConditions(&job, batchv1.JobFailed, corev1.ConditionTrue) { + failedCount++ + } + } + + opsStatus := appsv1alpha1.OpsRunningPhase + if succedCount == expectedCount { + opsStatus = appsv1alpha1.OpsSucceedPhase + } else if failedCount+succedCount == expectedCount { + opsStatus = appsv1alpha1.OpsFailedPhase } - return phase, time.Second, nil + + patch := client.MergeFrom(opsRequest.DeepCopy()) + opsRequest.Status.Progress = fmt.Sprintf("%d/%d", succedCount, expectedCount) + + // patch OpsRequest.status.components + if err := cli.Status().Patch(reqCtx.Ctx, opsRequest, patch); err != nil { + return opsStatus, time.Second, err + } + + if succedCount == expectedCount { + return appsv1alpha1.OpsSucceedPhase, 0, nil + } else if failedCount+succedCount == expectedCount { + return appsv1alpha1.OpsFailedPhase, 0, fmt.Errorf("%d job execution failed, please check the job log ", failedCount) + } + return appsv1alpha1.OpsRunningPhase, 5 * time.Second, nil } func (o DataScriptOpsHandler) ActionStartedCondition(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRes *OpsResource) (*metav1.Condition, error) { @@ -207,127 +237,168 @@ func getTargetService(reqCtx intctrlutil.RequestCtx, cli client.Client, clusterO return serviceName, nil } -func buildDataScriptJob(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster *appsv1alpha1.Cluster, component *appsv1alpha1.ClusterComponentSpec, - ops *appsv1alpha1.OpsRequest, charType string) (*batchv1.Job, error) { +func buildDataScriptJobs(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster *appsv1alpha1.Cluster, component *appsv1alpha1.ClusterComponentSpec, + ops *appsv1alpha1.OpsRequest, charType string) ([]*batchv1.Job, error) { engineForJob, err := engine.New(charType) if err != nil || engineForJob == nil { return nil, &FastFaileError{message: err.Error()} } - envs := []corev1.EnvVar{} - // parse kb host - serviceName, err := getTargetService(reqCtx, cli, client.ObjectKeyFromObject(cluster), component.Name) - if err != nil { - return nil, &FastFaileError{message: err.Error()} - } - - envs = append(envs, corev1.EnvVar{ - Name: "KB_HOST", - Value: serviceName, - }) - - // parse username and password - secretFrom := ops.Spec.ScriptSpec.Secret - if secretFrom == nil { - secretFrom = &appsv1alpha1.ScriptSecret{ - Name: fmt.Sprintf("%s-conn-credential", cluster.Name), - PasswordKey: "password", - UsernameKey: "username", + buildJob := func(endpoint string) (*batchv1.Job, error) { + envs := []corev1.EnvVar{} + + envs = append(envs, corev1.EnvVar{ + Name: "KB_HOST", + Value: endpoint, + }) + + // parse username and password + secretFrom := ops.Spec.ScriptSpec.Secret + if secretFrom == nil { + secretFrom = &appsv1alpha1.ScriptSecret{ + Name: fmt.Sprintf("%s-conn-credential", cluster.Name), + PasswordKey: "password", + UsernameKey: "username", + } + } + // verify secrets exist + if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: secretFrom.Name}, &corev1.Secret{}); err != nil { + return nil, &FastFaileError{message: err.Error()} } - } - // verify secrets exist - if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: secretFrom.Name}, &corev1.Secret{}); err != nil { - return nil, &FastFaileError{message: err.Error()} - } - envs = append(envs, corev1.EnvVar{ - Name: "KB_USER", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - Key: secretFrom.UsernameKey, - LocalObjectReference: corev1.LocalObjectReference{ - Name: secretFrom.Name, + envs = append(envs, corev1.EnvVar{ + Name: "KB_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: secretFrom.UsernameKey, + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretFrom.Name, + }, }, }, - }, - }) - envs = append(envs, corev1.EnvVar{ - Name: "KB_PASSWD", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - Key: secretFrom.PasswordKey, - LocalObjectReference: corev1.LocalObjectReference{ - Name: secretFrom.Name, + }) + envs = append(envs, corev1.EnvVar{ + Name: "KB_PASSWD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: secretFrom.PasswordKey, + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretFrom.Name, + }, }, }, - }, - }) + }) - // parse scripts - scripts, err := getScriptContent(reqCtx, cli, ops.Spec.ScriptSpec) - if err != nil { - return nil, &FastFaileError{message: err.Error()} - } + // parse scripts + scripts, err := getScriptContent(reqCtx, cli, ops.Spec.ScriptSpec) + if err != nil { + return nil, &FastFaileError{message: err.Error()} + } - envs = append(envs, corev1.EnvVar{ - Name: "KB_SCRIPT", - Value: strings.Join(scripts, "\n"), - }) + envs = append(envs, corev1.EnvVar{ + Name: "KB_SCRIPT", + Value: strings.Join(scripts, "\n"), + }) - jobCmdTpl, envVars, err := engineForJob.ExecuteCommand(scripts) - if err != nil { - return nil, &FastFaileError{message: err.Error()} - } - if envVars != nil { - envs = append(envs, envVars...) - } - containerImg := viper.GetString(constant.KBDataScriptClientsImage) - if len(ops.Spec.ScriptSpec.Image) != 0 { - containerImg = ops.Spec.ScriptSpec.Image - } - if len(containerImg) == 0 { - return nil, &FastFaileError{message: "image is empty"} - } + jobCmdTpl, envVars, err := engineForJob.ExecuteCommand(scripts) + if err != nil { + return nil, &FastFaileError{message: err.Error()} + } + if envVars != nil { + envs = append(envs, envVars...) + } + containerImg := viper.GetString(constant.KBDataScriptClientsImage) + if len(ops.Spec.ScriptSpec.Image) != 0 { + containerImg = ops.Spec.ScriptSpec.Image + } + if len(containerImg) == 0 { + return nil, &FastFaileError{message: "image is empty"} + } - container := corev1.Container{ - Name: "datascript", - Image: containerImg, - ImagePullPolicy: corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)), - Command: jobCmdTpl, - Env: envs, - } + container := corev1.Container{ + Name: "datascript", + Image: containerImg, + ImagePullPolicy: corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)), + Command: jobCmdTpl, + Env: envs, + } + randomStr, _ := password.Generate(4, 0, 0, true, false) + jobName := fmt.Sprintf("%s-%s-%s-%s", cluster.Name, "script", ops.Name, randomStr) + if len(jobName) > 63 { + jobName = jobName[:63] + } - jobName := fmt.Sprintf("%s-%s-%s", cluster.Name, "script", ops.Name) - if len(jobName) > 63 { - jobName = jobName[:63] - } + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: cluster.Namespace, + }, + } - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: cluster.Namespace, - }, + // set backoff limit to 0, so that the job will not be restarted + job.Spec.BackoffLimit = pointer.Int32(0) + job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + job.Spec.Template.Spec.Containers = []corev1.Container{container} + + // add labels + job.Labels = getDataScriptJobLabels(cluster.Name, component.Name, ops.Name) + // add tolerations + tolerations, err := componetutil.BuildTolerations(cluster, component) + if err != nil { + return nil, &FastFaileError{message: err.Error()} + } + job.Spec.Template.Spec.Tolerations = tolerations + // add owner reference + scheme, _ := appsv1alpha1.SchemeBuilder.Build() + if err := controllerutil.SetOwnerReference(ops, job, scheme); err != nil { + return nil, &FastFaileError{message: err.Error()} + } + return job, nil } - // set backoff limit to 0, so that the job will not be restarted - job.Spec.BackoffLimit = pointer.Int32Ptr(0) - job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever - job.Spec.Template.Spec.Containers = []corev1.Container{container} + // parse kb host + var endpoint string + var job *batchv1.Job - // add labels - job.Labels = getDataScriptJobLabels(cluster.Name, component.Name, ops.Name) - // add tolerations - tolerations, err := componetutil.BuildTolerations(cluster, component) + jobs := make([]*batchv1.Job, 0) + if ops.Spec.ScriptSpec.Selector == nil { + if endpoint, err = getTargetService(reqCtx, cli, client.ObjectKeyFromObject(cluster), component.Name); err != nil { + return nil, &FastFaileError{message: err.Error()} + } + if job, err = buildJob(endpoint); err != nil { + return nil, &FastFaileError{message: err.Error()} + } + jobs = append(jobs, job) + return jobs, nil + } + + selector, err := metav1.LabelSelectorAsSelector(ops.Spec.ScriptSpec.Selector) if err != nil { return nil, &FastFaileError{message: err.Error()} } - job.Spec.Template.Spec.Tolerations = tolerations - // add owner reference - scheme, _ := appsv1alpha1.SchemeBuilder.Build() - if err := controllerutil.SetOwnerReference(ops, job, scheme); err != nil { + + pods := &corev1.PodList{} + if err = cli.List(reqCtx.Ctx, pods, client.InNamespace(cluster.Namespace), + client.MatchingLabels{ + constant.AppInstanceLabelKey: cluster.Name, + constant.KBAppComponentLabelKey: component.Name, + }, + client.MatchingLabelsSelector{Selector: selector}, + ); err != nil { return nil, &FastFaileError{message: err.Error()} + } else if len(pods.Items) == 0 { + return nil, &FastFaileError{message: "no pods found"} + } + + for _, pod := range pods.Items { + endpoint = pod.Status.PodIP + if job, err = buildJob(endpoint); err != nil { + return nil, &FastFaileError{message: err.Error()} + } else { + jobs = append(jobs, job) + } } - return job, nil + return jobs, nil } func getDataScriptJobLabels(cluster, component, request string) map[string]string { diff --git a/controllers/apps/operations/datascript_test.go b/controllers/apps/operations/datascript_test.go index 3a46c1568d9..5e477d7d614 100644 --- a/controllers/apps/operations/datascript_test.go +++ b/controllers/apps/operations/datascript_test.go @@ -236,7 +236,7 @@ var _ = Describe("DataScriptOps", func() { reqCtx.Req = reconcile.Request{NamespacedName: opsKey} By("mock a job, missing service, should fail") comp := clusterObj.Spec.GetComponentByName(consensusComp) - _, err := buildDataScriptJob(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") + _, err := buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") Expect(err).Should(HaveOccurred()) By("mock a service, should pass") @@ -249,7 +249,7 @@ var _ = Describe("DataScriptOps", func() { Expect(err).Should(Succeed()) By("mock a job one more time, fail with missing secret") - _, err = buildDataScriptJob(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") + _, err = buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") Expect(err).Should(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring("conn-credential")) @@ -263,7 +263,7 @@ var _ = Describe("DataScriptOps", func() { } Expect(k8sClient.Patch(testCtx.Ctx, ops, patch)).Should(Succeed()) - _, err = buildDataScriptJob(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") + _, err = buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") Expect(err).Should(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring(secretName)) @@ -281,8 +281,9 @@ var _ = Describe("DataScriptOps", func() { By("create job, should pass") viper.Set(constant.KBDataScriptClientsImage, "apecloud/kubeblocks-clients:latest") - job, err := buildDataScriptJob(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") + jobs, err := buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") Expect(err).Should(Succeed()) + job := jobs[0] Expect(k8sClient.Create(testCtx.Ctx, job)).Should(Succeed()) By("reconcile the opsRequest phase") @@ -358,8 +359,9 @@ var _ = Describe("DataScriptOps", func() { By("create job, should pass") viper.Set(constant.KBDataScriptClientsImage, "apecloud/kubeblocks-clients:latest") - job, err := buildDataScriptJob(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") + jobs, err := buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") Expect(err).Should(Succeed()) + job := jobs[0] Expect(k8sClient.Create(testCtx.Ctx, job)).Should(Succeed()) By("reconcile the opsRequest phase") diff --git a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml index 140b9cbb572..de028334380 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml @@ -418,6 +418,58 @@ spec: required: - name type: object + selector: + description: KubeBlocks, by default, will execute the script on + the primary pod, with role=leader. There are some exceptions, + such as Redis, which does not synchronize accounts info between + primary and secondary. In this case, we need to execute the + script on all pods, matching the selector. selector indicates + the components on which the script is executed. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-validations: + - message: forbidden to update spec.scriptSpec.script.selector + rule: self == oldSelf required: - componentName type: object diff --git a/docker/Dockerfile-datascript b/docker/Dockerfile-datascript index 6dde6ebf653..da47d704d20 100644 --- a/docker/Dockerfile-datascript +++ b/docker/Dockerfile-datascript @@ -1,6 +1,6 @@ # Build client images for mysql and postgres to support datascripts -# Use alpine with tag 20230329 is corresponding to "edge" tag (latest release to date is 3.18) as of 20230625 -FROM docker.io/alpine:edge as dist +# The latest release to date is 3.18) as of 20230625 +FROM docker.io/alpine:3.18 as dist # ARG APK_MIRROR # install tools via apk @@ -12,6 +12,8 @@ RUN apk add --no-cache jq --allow-untrusted RUN apk add --no-cache postgresql-client --allow-untrusted RUN apk add --no-cache mysql-client mariadb-connector-c --allow-untrusted + +RUN apk add redis RUN rm -rf /var/cache/apk/* USER 65532:65532 diff --git a/lorry/engine/engine_test.go b/lorry/engine/engine_test.go index 599f54b61e1..d943a59af6e 100644 --- a/lorry/engine/engine_test.go +++ b/lorry/engine/engine_test.go @@ -53,14 +53,14 @@ var _ = Describe("Engine", func() { }) It("new execute command ", func() { - for _, typeName := range []string{stateMysql, statePostgreSQL} { + for _, typeName := range []string{stateMysql, statePostgreSQL, stateRedis} { engine, _ := New(typeName) Expect(engine).ShouldNot(BeNil()) _, _, err := engine.ExecuteCommand([]string{"some", "cmd"}) Expect(err).Should(Succeed()) } - for _, typeName := range []string{stateRedis, stateMongoDB, stateNebula} { + for _, typeName := range []string{stateMongoDB, stateNebula} { engine, _ := New(typeName) Expect(engine).ShouldNot(BeNil()) diff --git a/lorry/engine/redis.go b/lorry/engine/redis.go index 409bed81388..8228a2639c6 100644 --- a/lorry/engine/redis.go +++ b/lorry/engine/redis.go @@ -69,6 +69,16 @@ func (r redis) ConnectExample(info *ConnectionInfo, client string) string { return buildExample(info, client, r.examples) } -func (r redis) ExecuteCommand([]string) ([]string, []corev1.EnvVar, error) { - return nil, nil, fmt.Errorf("%s not implemented", r.info.Client) +func (r redis) ExecuteCommand(scripts []string) ([]string, []corev1.EnvVar, error) { + cmd := []string{} + args := []string{} + cmd = append(cmd, "/bin/sh", "-c") + for _, script := range scripts { + args = append(args, fmt.Sprintf("%s -h %s -p 6379 --user %s --pass %s %s", r.info.Client, + fmt.Sprintf("$%s", envVarMap[host]), + fmt.Sprintf("$%s", envVarMap[user]), + fmt.Sprintf("$%s", envVarMap[password]), script)) + } + cmd = append(cmd, strings.Join(args, " && ")) + return cmd, nil, nil } From 265dfe8439796ede8c8aed74877ae9b74165cfed Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Tue, 26 Sep 2023 12:36:06 +0800 Subject: [PATCH 31/58] fix: failed to configuration-controller ut (#5249) (#5261) --- .../configuration/configuration_controller.go | 4 ++- .../configuration_controller_test.go | 1 + .../apps/configuration/reconcile_task.go | 1 - .../apps/operations/reconfigure_test.go | 4 +-- internal/controller/configuration/pipeline.go | 26 +++++++------- .../configuration/template_wrapper.go | 35 +++++++++++++++++++ 6 files changed, 53 insertions(+), 18 deletions(-) diff --git a/controllers/apps/configuration/configuration_controller.go b/controllers/apps/configuration/configuration_controller.go index 3b82786427d..46c6fa5a951 100644 --- a/controllers/apps/configuration/configuration_controller.go +++ b/controllers/apps/configuration/configuration_controller.go @@ -46,7 +46,7 @@ type ConfigurationReconciler struct { Recorder record.EventRecorder } -const reconcileInterval = time.Millisecond * 10 +const reconcileInterval = time.Second * 2 //+kubebuilder:rbac:groups=apps.kubeblocks.io,resources=configurations,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=apps.kubeblocks.io,resources=configurations/status,verbs=get;update;patch @@ -149,6 +149,8 @@ func (r *ConfigurationReconciler) runTasks( errs = append(errs, err) continue } + task.Status.UpdateRevision = revision + task.Status.Phase = appsv1alpha1.CMergedPhase if err := task.SyncStatus(fetcher, task.Status); err != nil { task.Status.Phase = appsv1alpha1.CFailedPhase task.Status.Message = cfgutil.ToPointer(err.Error()) diff --git a/controllers/apps/configuration/configuration_controller_test.go b/controllers/apps/configuration/configuration_controller_test.go index 57a21717b44..9dc2fd73e97 100644 --- a/controllers/apps/configuration/configuration_controller_test.go +++ b/controllers/apps/configuration/configuration_controller_test.go @@ -86,6 +86,7 @@ var _ = Describe("Configuration Controller", func() { g.Expect(k8sClient.Get(ctx, cfgKey, cfg)).Should(Succeed()) itemStatus := cfg.Status.GetItemStatus(configSpecName) g.Expect(itemStatus).ShouldNot(BeNil()) + g.Expect(itemStatus.UpdateRevision).Should(BeEquivalentTo("2")) g.Expect(itemStatus.Phase).Should(BeEquivalentTo(appsv1alpha1.CFinishedPhase)) }, time.Second*60, time.Second*1).Should(Succeed()) }) diff --git a/controllers/apps/configuration/reconcile_task.go b/controllers/apps/configuration/reconcile_task.go index f511bb2eeb8..3f4454281b4 100644 --- a/controllers/apps/configuration/reconcile_task.go +++ b/controllers/apps/configuration/reconcile_task.go @@ -62,7 +62,6 @@ func NewTask(item appsv1alpha1.ConfigurationItemDetail, status *appsv1alpha1.Con ApplyParameters(). UpdateConfigVersion(revision). Sync(). - SyncStatus(). Complete() }, SyncStatus: syncStatus, diff --git a/controllers/apps/operations/reconfigure_test.go b/controllers/apps/operations/reconfigure_test.go index c005fb6d732..6afad600d5f 100644 --- a/controllers/apps/operations/reconfigure_test.go +++ b/controllers/apps/operations/reconfigure_test.go @@ -193,7 +193,7 @@ var _ = Describe("Reconfigure OpsRequest", func() { opsRes, eventContext := assureMockReconfigureData("simple") reqCtx := intctrlutil.RequestCtx{ Ctx: testCtx.Ctx, - Log: log.FromContext(ctx).WithValues("Reconfigure"), + Log: log.FromContext(ctx).WithName("Reconfigure"), Recorder: opsRes.Recorder, } @@ -258,7 +258,7 @@ var _ = Describe("Reconfigure OpsRequest", func() { opsRes, eventContext := assureMockReconfigureData("autoReload") reqCtx := intctrlutil.RequestCtx{ Ctx: testCtx.Ctx, - Log: log.FromContext(ctx).WithValues("Reconfigure"), + Log: log.FromContext(ctx).WithName("Reconfigure"), Recorder: opsRes.Recorder, } diff --git a/internal/controller/configuration/pipeline.go b/internal/controller/configuration/pipeline.go index a64a6250d98..7df38f8b7fa 100644 --- a/internal/controller/configuration/pipeline.go +++ b/internal/controller/configuration/pipeline.go @@ -145,16 +145,17 @@ func (p *pipeline) UpdateConfigurationStatus() *pipeline { } existing := p.ConfigurationObj + reversion := fromConfiguration(existing) patch := client.MergeFrom(existing) updated := existing.DeepCopy() for _, item := range existing.Spec.ConfigItemDetails { - checkAndUpdateItemStatus(updated, item) + checkAndUpdateItemStatus(updated, item, reversion) } return p.ResourceFetcher.Client.Status().Patch(p.Context, updated, patch) }) } -func checkAndUpdateItemStatus(updated *appsv1alpha1.Configuration, item appsv1alpha1.ConfigurationItemDetail) { +func checkAndUpdateItemStatus(updated *appsv1alpha1.Configuration, item appsv1alpha1.ConfigurationItemDetail, reversion string) { foundStatus := func(name string) *appsv1alpha1.ConfigurationItemDetailStatus { for i := range updated.Status.ConfigurationItemStatus { status := &updated.Status.ConfigurationItemStatus[i] @@ -172,8 +173,9 @@ func checkAndUpdateItemStatus(updated *appsv1alpha1.Configuration, item appsv1al if status == nil { updated.Status.ConfigurationItemStatus = append(updated.Status.ConfigurationItemStatus, appsv1alpha1.ConfigurationItemDetailStatus{ - Name: item.Name, - Phase: appsv1alpha1.CInitPhase, + Name: item.Name, + Phase: appsv1alpha1.CInitPhase, + UpdateRevision: reversion, }) } } @@ -356,25 +358,21 @@ func (p *updatePipeline) UpdateConfigVersion(revision string) *updatePipeline { if p.isDone() { return nil } + + if err := updateConfigMetaForCM(p.newCM, &p.item, revision); err != nil { + return err + } annotations := p.newCM.Annotations if annotations == nil { annotations = make(map[string]string) } - b, err := json.Marshal(p.item) - if err != nil { - return err - } - annotations[constant.ConfigAppliedVersionAnnotationKey] = string(b) - hash, _ := cfgutil.ComputeHash(p.newCM.Data) - annotations[constant.CMInsCurrentConfigurationHashLabelKey] = hash - annotations[constant.ConfigurationRevision] = revision - annotations[constant.CMConfigurationTemplateVersion] = p.item.Version + // delete disable reconcile annotation if _, ok := annotations[constant.DisableUpgradeInsConfigurationAnnotationKey]; ok { annotations[constant.DisableUpgradeInsConfigurationAnnotationKey] = strconv.FormatBool(false) } p.newCM.Annotations = annotations - p.itemStatus.UpdateRevision = revision + // p.itemStatus.UpdateRevision = revision return nil }) } diff --git a/internal/controller/configuration/template_wrapper.go b/internal/controller/configuration/template_wrapper.go index 8709e761c57..e774224c992 100644 --- a/internal/controller/configuration/template_wrapper.go +++ b/internal/controller/configuration/template_wrapper.go @@ -23,6 +23,7 @@ import ( "context" "encoding/json" "reflect" + "strconv" "strings" corev1 "k8s.io/api/core/v1" @@ -34,6 +35,7 @@ import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" + cfgutil "github.com/apecloud/kubeblocks/internal/configuration/util" "github.com/apecloud/kubeblocks/internal/configuration/validate" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/component" @@ -97,6 +99,7 @@ func (wrapper *renderWrapper) checkRerenderTemplateSpec(cfgCMName string, localO func (wrapper *renderWrapper) renderConfigTemplate(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, localObjs []client.Object, configuration *appsv1alpha1.Configuration) error { scheme, _ := appsv1alpha1.SchemeBuilder.Build() + revision := fromConfiguration(configuration) for _, configSpec := range component.ConfigTemplates { var item *appsv1alpha1.ConfigurationItemDetail cmName := core.GetComponentCfgName(cluster.Name, component.Name, configSpec.Name) @@ -121,10 +124,42 @@ func (wrapper *renderWrapper) renderConfigTemplate(cluster *appsv1alpha1.Cluster if err := wrapper.addRenderedObject(configSpec.ComponentTemplateSpec, newCMObj, scheme, configuration); err != nil { return err } + if err := updateConfigMetaForCM(newCMObj, item, revision); err != nil { + return err + } } return nil } +func fromConfiguration(configuration *appsv1alpha1.Configuration) string { + if configuration == nil { + return "" + } + return strconv.FormatInt(configuration.GetGeneration(), 10) +} + +func updateConfigMetaForCM(newCMObj *corev1.ConfigMap, item *appsv1alpha1.ConfigurationItemDetail, revision string) (err error) { + if item == nil { + return + } + + annotations := newCMObj.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + b, err := json.Marshal(item) + if err != nil { + return err + } + annotations[constant.ConfigAppliedVersionAnnotationKey] = string(b) + hash, _ := cfgutil.ComputeHash(newCMObj.Data) + annotations[constant.CMInsCurrentConfigurationHashLabelKey] = hash + annotations[constant.ConfigurationRevision] = revision + annotations[constant.CMConfigurationTemplateVersion] = item.Version + newCMObj.Annotations = annotations + return +} + func applyUpdatedParameters(item *appsv1alpha1.ConfigurationItemDetail, cm *corev1.ConfigMap, configSpec appsv1alpha1.ComponentConfigSpec, cli client.Client, ctx context.Context) (err error) { var newData map[string]string var configConstraint *appsv1alpha1.ConfigConstraint From 385591baaabf94c4e8ab96c40e466b56d0d5065a Mon Sep 17 00:00:00 2001 From: Vettal <92501707+vettalwu@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:57:55 +0800 Subject: [PATCH 32/58] fix: Upgrade consensusSpec to rsmSpec for PolarDB-X. (#5259) --- .../polardbx/templates/clusterDefintion.yaml | 60 +++++++++++++++---- 1 file changed, 47 insertions(+), 13 deletions(-) diff --git a/deploy/polardbx/templates/clusterDefintion.yaml b/deploy/polardbx/templates/clusterDefintion.yaml index 1429045bb82..42a8bc7c70d 100644 --- a/deploy/polardbx/templates/clusterDefintion.yaml +++ b/deploy/polardbx/templates/clusterDefintion.yaml @@ -29,12 +29,30 @@ spec: followers: - name: "follower" accessMode: Readonly - updateStrategy: Parallel - probes: + rsmSpec: + roles: + - name: "leader" + accessMode: ReadWrite + isLeader: true + canVote: true + - name: "follower" + accessMode: Readonly + canVote: true roleProbe: - failureThreshold: {{ .Values.roleProbe.failureThreshold }} - periodSeconds: {{ .Values.roleProbe.periodSeconds }} - timeoutSeconds: {{ .Values.roleProbe.timeoutSeconds }} + roleUpdateMechanism: DirectAPIServerEventUpdate + probeActions: + - image: "arey/mysql-client:latest" + command: + - mysql + - "-h127.0.0.1" + - "-P3306" + - "-uroot" + - "-N" + - "-B" + - "-e" + - "\"select role from information_schema.alisql_cluster_local\"" + - "|" + - "xargs echo -n" service: ports: - name: mysql @@ -50,9 +68,7 @@ spec: scrapePath: "/metrics" podSpec: volumes: &xstoreVolumes - - hostPath: - path: /data/cache/tools/xstore - type: Directory + - emptyDir: {} name: xstore-tools - downwardAPI: defaultMode: 420 @@ -234,12 +250,30 @@ spec: followers: - name: "follower" accessMode: Readonly - updateStrategy: Parallel - probes: + rsmSpec: + roles: + - name: "leader" + accessMode: ReadWrite + isLeader: true + canVote: true + - name: "follower" + accessMode: Readonly + canVote: true roleProbe: - failureThreshold: {{ .Values.roleProbe.failureThreshold }} - periodSeconds: {{ .Values.roleProbe.periodSeconds }} - timeoutSeconds: {{ .Values.roleProbe.timeoutSeconds }} + roleUpdateMechanism: DirectAPIServerEventUpdate + probeActions: + - image: "arey/mysql-client:latest" + command: + - mysql + - "-h127.0.0.1" + - "-P3306" + - "-uroot" + - "-N" + - "-B" + - "-e" + - "\"select role from information_schema.alisql_cluster_local\"" + - "|" + - "xargs echo -n" service: ports: - name: mysql From 4bc415ca53affe7839494f7bacea40d3f454926b Mon Sep 17 00:00:00 2001 From: huangzhangshu <109708205+JashBook@users.noreply.github.com> Date: Tue, 26 Sep 2023 14:18:50 +0800 Subject: [PATCH 33/58] chore: split jihulab chart url to addons and applications (#5264) --- .github/workflows/release-helm-chart.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-helm-chart.yml b/.github/workflows/release-helm-chart.yml index 6b8e73f8cc9..e31c0e2d26d 100644 --- a/.github/workflows/release-helm-chart.yml +++ b/.github/workflows/release-helm-chart.yml @@ -37,14 +37,14 @@ jobs: release-chart: needs: chart-version - uses: apecloud/apecloud-cd/.github/workflows/release-charts.yml@v0.1.25 + uses: apecloud/apecloud-cd/.github/workflows/release-charts.yml@v0.1.28 with: MAKE_OPS: "bump-chart-ver" VERSION: "${{ needs.chart-version.outputs.chart-version }}" CHART_NAME: "kubeblocks" CHART_DIR: "deploy/helm" DEP_CHART_DIR: "deploy/helm/depend-charts" - APECD_REF: "v0.1.25" + APECD_REF: "v0.1.28" secrets: inherit release-charts-image: From 86f8ff53f20be82aa0f77f842052ce0d2f919ed8 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 26 Sep 2023 14:58:16 +0800 Subject: [PATCH 34/58] chore: make lorry role probe timeout configurable (#5265) --- deploy/mongodb/values.yaml | 2 +- .../controller/rsm/transformer_object_generation.go | 7 +++++++ internal/controller/rsm/types.go | 1 + lorry/binding/base.go | 10 ++++++++-- lorry/binding/types.go | 2 ++ 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/deploy/mongodb/values.yaml b/deploy/mongodb/values.yaml index b0575e276a4..103b1ad5b11 100644 --- a/deploy/mongodb/values.yaml +++ b/deploy/mongodb/values.yaml @@ -20,7 +20,7 @@ fullnameOverride: "" roleProbe: failureThreshold: 3 periodSeconds: 2 - timeoutSeconds: 1 + timeoutSeconds: 2 ## Authentication parameters ## diff --git a/internal/controller/rsm/transformer_object_generation.go b/internal/controller/rsm/transformer_object_generation.go index bf8ae37f1cf..421f2d8788e 100644 --- a/internal/controller/rsm/transformer_object_generation.go +++ b/internal/controller/rsm/transformer_object_generation.go @@ -444,6 +444,13 @@ func injectRoleProbeAgentContainer(rsm workloads.ReplicatedStateMachine, templat Value: string(roleProbe.RoleUpdateMechanism), }) + // inject role probe timeout env + env = append(env, + corev1.EnvVar{ + Name: roleProbeTimeoutVarName, + Value: strconv.Itoa(int(roleProbe.TimeoutSeconds)), + }) + // lorry related envs env = append(env, corev1.EnvVar{ diff --git a/internal/controller/rsm/types.go b/internal/controller/rsm/types.go index a7f5aaa6f0e..9912c48143b 100644 --- a/internal/controller/rsm/types.go +++ b/internal/controller/rsm/types.go @@ -89,6 +89,7 @@ const ( leaderHostVarName = "KB_RSM_LEADER_HOST" targetHostVarName = "KB_RSM_TARGET_HOST" RoleUpdateMechanismVarName = "KB_RSM_ROLE_UPDATE_MECHANISM" + roleProbeTimeoutVarName = "KB_RSM_ROLE_PROBE_TIMEOUT" directAPIServerEventFieldPath = "spec.containers{sqlchannel}" readinessProbeEventFieldPath = "spec.containers{" + roleProbeContainerName + "}" legacyEventFieldPath = "spec.containers{kb-checkrole}" diff --git a/lorry/binding/base.go b/lorry/binding/base.go index 5652b7dabb5..c1d71efa3c7 100644 --- a/lorry/binding/base.go +++ b/lorry/binding/base.go @@ -217,8 +217,14 @@ func (ops *BaseOperations) CheckRoleOps(ctx context.Context, req *ProbeRequest, return opsRes, nil } - // sql exec timeout needs to be less than httpget's timeout which by default 1s. - ctx1, cancel := context.WithTimeout(ctx, 999*time.Millisecond) + timeoutSeconds := defaultRoleProbeTimeoutSeconds + if viper.IsSet(roleProbeTimeoutVarName) { + timeoutSeconds = viper.GetInt(roleProbeTimeoutVarName) + } + // lorry utilizes the pod readiness probe to trigger role probe and 'timeoutSeconds' is directly copied from the 'probe.timeoutSeconds' field of pod. + // here we give 80% of the total time to role probe job and leave the remaining 20% to kubelet to handle the readiness probe related tasks. + timeout := time.Duration(timeoutSeconds) * (800 * time.Millisecond) + ctx1, cancel := context.WithTimeout(ctx, timeout) defer cancel() role, err := ops.GetRole(ctx1, req, resp) if err != nil { diff --git a/lorry/binding/types.go b/lorry/binding/types.go index c888f1bab56..95fd547f222 100644 --- a/lorry/binding/types.go +++ b/lorry/binding/types.go @@ -49,8 +49,10 @@ const ( roleEventReportFrequency = int(1 / roleEventRecordQPS) defaultFailedEventReportFrequency = 1800 defaultRoleDetectionThreshold = 300 + defaultRoleProbeTimeoutSeconds = 2 rsmRoleUpdateMechanismVarName = "KB_RSM_ROLE_UPDATE_MECHANISM" + roleProbeTimeoutVarName = "KB_RSM_ROLE_PROBE_TIMEOUT" ) const ( From 5223b2c5c6b842e9f3d8108d8e4757b6331e41a9 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 26 Sep 2023 15:32:16 +0800 Subject: [PATCH 35/58] chore: workload type validation rules in CD (#5266) --- apis/apps/v1alpha1/clusterdefinition_types.go | 2 +- .../crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml | 7 ++++--- .../helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml | 7 ++++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/apis/apps/v1alpha1/clusterdefinition_types.go b/apis/apps/v1alpha1/clusterdefinition_types.go index 6d2e9514cb6..63b66e095cf 100644 --- a/apis/apps/v1alpha1/clusterdefinition_types.go +++ b/apis/apps/v1alpha1/clusterdefinition_types.go @@ -305,7 +305,7 @@ type ServiceRefDeclarationSpec struct { // ClusterComponentDefinition provides a workload component specification template, // with attributes that strongly work with stateful workloads and day-2 operations // behaviors. -// +kubebuilder:validation:XValidation:rule="has(self.workloadType) && self.workloadType == 'Consensus' ? has(self.consensusSpec) : !has(self.consensusSpec)",message="componentDefs.consensusSpec is required when componentDefs.workloadType is Consensus, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.workloadType) && self.workloadType == 'Consensus' ? (has(self.consensusSpec) || has(self.rsmSpec)) : !has(self.consensusSpec)",message="componentDefs.consensusSpec(deprecated) or componentDefs.rsmSpec(recommended) is required when componentDefs.workloadType is Consensus, and forbidden otherwise" type ClusterComponentDefinition struct { // A component definition name, this name could be used as default name of `Cluster.spec.componentSpecs.name`, // and so this name is need to conform with same validation rules as `Cluster.spec.componentSpecs.name`, that diff --git a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml index abe37f1b831..8404c93bbb1 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml @@ -9552,10 +9552,11 @@ spec: - workloadType type: object x-kubernetes-validations: - - message: componentDefs.consensusSpec is required when componentDefs.workloadType - is Consensus, and forbidden otherwise + - message: componentDefs.consensusSpec(deprecated) or componentDefs.rsmSpec(recommended) + is required when componentDefs.workloadType is Consensus, and + forbidden otherwise rule: 'has(self.workloadType) && self.workloadType == ''Consensus'' - ? has(self.consensusSpec) : !has(self.consensusSpec)' + ? (has(self.consensusSpec) || has(self.rsmSpec)) : !has(self.consensusSpec)' minItems: 1 type: array x-kubernetes-list-map-keys: diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml index abe37f1b831..8404c93bbb1 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml @@ -9552,10 +9552,11 @@ spec: - workloadType type: object x-kubernetes-validations: - - message: componentDefs.consensusSpec is required when componentDefs.workloadType - is Consensus, and forbidden otherwise + - message: componentDefs.consensusSpec(deprecated) or componentDefs.rsmSpec(recommended) + is required when componentDefs.workloadType is Consensus, and + forbidden otherwise rule: 'has(self.workloadType) && self.workloadType == ''Consensus'' - ? has(self.consensusSpec) : !has(self.consensusSpec)' + ? (has(self.consensusSpec) || has(self.rsmSpec)) : !has(self.consensusSpec)' minItems: 1 type: array x-kubernetes-list-map-keys: From 25046df4f19135b4ad22080f4a975b031c319110 Mon Sep 17 00:00:00 2001 From: yuanyuan zhang <111744220+michelle-0808@users.noreply.github.com> Date: Tue, 26 Sep 2023 18:34:20 +0800 Subject: [PATCH 36/58] docs: update slack link (#5275) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ec565b3572a..ebd02955959 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ When adopting a multi-cloud or hybrid cloud strategy, it is essential to priorit ## Community -- KubeBlocks [Slack Channel](https://kubeblocks.slack.com/join/shared_invite/zt-22cx5p0y9-~BDNuPqxkdgswI_FSdx_8g) +- KubeBlocks [Slack Channel](https://join.slack.com/t/kubeblocks/shared_invite/zt-23vym7xpx-Xu3xcE7HmcqGKvTX4U9yTg) - KubeBlocks Github [Discussions](https://github.com/apecloud/kubeblocks/discussions) ## Contributing to KubeBlocks From 41cf72dbe703c7eb1d23b282d8fd1544ed5c439f Mon Sep 17 00:00:00 2001 From: huangzhangshu <109708205+JashBook@users.noreply.github.com> Date: Tue, 26 Sep 2023 19:33:40 +0800 Subject: [PATCH 37/58] chore: update helm chart index (#5277) --- .github/workflows/release-delete.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release-delete.yml b/.github/workflows/release-delete.yml index 7593b5aafed..2cc5cf98934 100644 --- a/.github/workflows/release-delete.yml +++ b/.github/workflows/release-delete.yml @@ -15,15 +15,15 @@ run-name: Delete Release:${{ inputs.release-version }} jobs: delete-release: if: github.event_name != 'schedule' - uses: apecloud/apecloud-cd/.github/workflows/release-delete.yml@v0.1.25 + uses: apecloud/apecloud-cd/.github/workflows/release-delete.yml@v0.1.29 with: VERSION: "${{ inputs.release-version }}" - APECD_REF: "v0.1.25" + APECD_REF: "v0.1.29" secrets: inherit delete-release-schedule: if: github.event_name == 'schedule' - uses: apecloud/apecloud-cd/.github/workflows/release-delete-schedule.yml@v0.1.25 + uses: apecloud/apecloud-cd/.github/workflows/release-delete-schedule.yml@v0.1.29 with: - APECD_REF: "v0.1.25" + APECD_REF: "v0.1.29" secrets: inherit From be7353d0d0e7a9f0a5786e41ce75ca89b3d92fad Mon Sep 17 00:00:00 2001 From: Wei Cao Date: Tue, 26 Sep 2023 14:29:24 +0800 Subject: [PATCH 38/58] chore: tidy imports to fix lint errors --- controllers/apps/components/types.go | 3 ++- controllers/apps/configuration/revision.go | 3 ++- controllers/apps/configuration/revision_test.go | 2 +- controllers/apps/operations/pipeline.go | 2 +- controllers/apps/transformer_cluster_credential.go | 2 +- controllers/dataprotection/backup_controller.go | 2 +- internal/controller/configuration/configuration_test.go | 5 +++-- internal/controller/configuration/tool_image_builder.go | 2 +- 8 files changed, 12 insertions(+), 9 deletions(-) diff --git a/controllers/apps/components/types.go b/controllers/apps/components/types.go index 338009fb860..2cb046ad906 100644 --- a/controllers/apps/components/types.go +++ b/controllers/apps/components/types.go @@ -23,12 +23,13 @@ import ( "context" "fmt" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/apecloud/kubeblocks/internal/class" "github.com/apecloud/kubeblocks/internal/constant" types2 "github.com/apecloud/kubeblocks/internal/controller/client" "github.com/apecloud/kubeblocks/internal/controller/graph" "github.com/apecloud/kubeblocks/internal/controller/plan" - "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/controller/component" diff --git a/controllers/apps/configuration/revision.go b/controllers/apps/configuration/revision.go index 7fabec72ad8..3af84e14b70 100644 --- a/controllers/apps/configuration/revision.go +++ b/controllers/apps/configuration/revision.go @@ -24,10 +24,11 @@ import ( "strconv" "strings" + corev1 "k8s.io/api/core/v1" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" "github.com/apecloud/kubeblocks/internal/constant" - corev1 "k8s.io/api/core/v1" ) type ConfigurationRevision struct { diff --git a/controllers/apps/configuration/revision_test.go b/controllers/apps/configuration/revision_test.go index 49a351da841..428c1b2126d 100644 --- a/controllers/apps/configuration/revision_test.go +++ b/controllers/apps/configuration/revision_test.go @@ -23,11 +23,11 @@ import ( "fmt" "testing" - "github.com/apecloud/kubeblocks/internal/constant" "github.com/stretchr/testify/assert" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" + "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/builder" ) diff --git a/controllers/apps/operations/pipeline.go b/controllers/apps/operations/pipeline.go index f1bd1251e03..c0da1cd6912 100644 --- a/controllers/apps/operations/pipeline.go +++ b/controllers/apps/operations/pipeline.go @@ -20,12 +20,12 @@ along with this program. If not, see . package operations import ( - "github.com/apecloud/kubeblocks/internal/controller/configuration" "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" "github.com/apecloud/kubeblocks/internal/configuration/validate" + "github.com/apecloud/kubeblocks/internal/controller/configuration" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) diff --git a/controllers/apps/transformer_cluster_credential.go b/controllers/apps/transformer_cluster_credential.go index 3194a01bc6c..a4ae3233b7f 100644 --- a/controllers/apps/transformer_cluster_credential.go +++ b/controllers/apps/transformer_cluster_credential.go @@ -20,11 +20,11 @@ along with this program. If not, see . package apps import ( - "github.com/apecloud/kubeblocks/internal/controller/factory" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/apecloud/kubeblocks/internal/controller/component" + "github.com/apecloud/kubeblocks/internal/controller/factory" "github.com/apecloud/kubeblocks/internal/controller/graph" ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" diff --git a/controllers/dataprotection/backup_controller.go b/controllers/dataprotection/backup_controller.go index b6284624f6c..26b43b3a1b5 100644 --- a/controllers/dataprotection/backup_controller.go +++ b/controllers/dataprotection/backup_controller.go @@ -31,7 +31,6 @@ import ( "strings" "time" - ctrlbuilder "github.com/apecloud/kubeblocks/internal/controller/factory" snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v3/apis/volumesnapshot/v1beta1" snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "github.com/leaanthony/debme" @@ -60,6 +59,7 @@ import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" + ctrlbuilder "github.com/apecloud/kubeblocks/internal/controller/factory" "github.com/apecloud/kubeblocks/internal/controller/model" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" viper "github.com/apecloud/kubeblocks/internal/viperx" diff --git a/internal/controller/configuration/configuration_test.go b/internal/controller/configuration/configuration_test.go index 62c1c3d5d78..637c4003198 100644 --- a/internal/controller/configuration/configuration_test.go +++ b/internal/controller/configuration/configuration_test.go @@ -21,14 +21,15 @@ package configuration import ( . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/controller/component" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) const clusterDefName = "test-clusterdef" diff --git a/internal/controller/configuration/tool_image_builder.go b/internal/controller/configuration/tool_image_builder.go index 2ab3eed0aef..6a3f046817e 100644 --- a/internal/controller/configuration/tool_image_builder.go +++ b/internal/controller/configuration/tool_image_builder.go @@ -20,13 +20,13 @@ along with this program. If not, see . package configuration import ( - "github.com/apecloud/kubeblocks/internal/controller/factory" corev1 "k8s.io/api/core/v1" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" cfgcm "github.com/apecloud/kubeblocks/internal/configuration/config_manager" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/component" + "github.com/apecloud/kubeblocks/internal/controller/factory" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" viper "github.com/apecloud/kubeblocks/internal/viperx" ) From 37890bd7ff138dff8a37c7b82ef0fc429ef5e765 Mon Sep 17 00:00:00 2001 From: zjx20 Date: Wed, 27 Sep 2023 10:10:31 +0800 Subject: [PATCH 39/58] feat: support direct access to the backup repo (#5263) --- .../v1alpha1/backuprepo_types.go | 30 ++ .../storage/v1alpha1/storageprovider_types.go | 9 + ...aprotection.kubeblocks.io_backuprepos.yaml | 11 + ...torage.kubeblocks.io_storageproviders.yaml | 17 +- .../dataprotection/backup_controller.go | 4 +- .../dataprotection/backuprepo_controller.go | 499 ++++++++++++------ .../backuprepo_controller_test.go | 194 ++++++- controllers/dataprotection/type.go | 45 +- ...aprotection.kubeblocks.io_backuprepos.yaml | 11 + ...torage.kubeblocks.io_storageproviders.yaml | 17 +- .../helm/templates/storageprovider/cos.yaml | 10 + .../helm/templates/storageprovider/gcs.yaml | 19 +- .../helm/templates/storageprovider/minio.yaml | 10 + .../helm/templates/storageprovider/obs.yaml | 11 + .../helm/templates/storageprovider/oss.yaml | 10 + deploy/helm/templates/storageprovider/s3.yaml | 11 + go.mod | 2 +- 17 files changed, 679 insertions(+), 231 deletions(-) diff --git a/apis/dataprotection/v1alpha1/backuprepo_types.go b/apis/dataprotection/v1alpha1/backuprepo_types.go index 7266218edd3..f89c6156e35 100644 --- a/apis/dataprotection/v1alpha1/backuprepo_types.go +++ b/apis/dataprotection/v1alpha1/backuprepo_types.go @@ -22,6 +22,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// AccessMethod is an enum type that defines the access method of the backup repo. +type AccessMethod string + +const ( + // AccessMethodMount means that the storage is mounted locally, + // so that remote files can be accessed just like a local file. + AccessMethodMount AccessMethod = "Mount" + // AccessMethodTool means to access the storage with a command-line tool, + // which helps to transfer files between the storage and local. + AccessMethodTool AccessMethod = "Tool" +) + // BackupRepoSpec defines the desired state of BackupRepo type BackupRepoSpec struct { // The storage provider used by this backup repo. @@ -29,6 +41,12 @@ type BackupRepoSpec struct { // +kubebuilder:validation:Required StorageProviderRef string `json:"storageProviderRef"` + // Specifies the access method of the backup repo. + // +kubebuilder:validation:Enum={Mount,Tool} + // +kubebuilder:default=Mount + // +optional + AccessMethod AccessMethod `json:"accessMethod,omitempty"` + // The requested capacity for the PVC created by this backup repo. // +optional VolumeCapacity resource.Quantity `json:"volumeCapacity,omitempty"` @@ -74,6 +92,10 @@ type BackupRepoStatus struct { // +optional BackupPVCName string `json:"backupPVCName,omitempty"` + // toolConfigSecretName is the name of the secret containing the configuration for the access tool. + // +optional + ToolConfigSecretName string `json:"toolConfigSecretName,omitempty"` + // isDefault indicates whether this backup repo is the default one. // +optional IsDefault bool `json:"isDefault,omitempty"` @@ -111,3 +133,11 @@ type BackupRepoList struct { func init() { SchemeBuilder.Register(&BackupRepo{}, &BackupRepoList{}) } + +func (repo *BackupRepo) AccessByMount() bool { + return repo.Spec.AccessMethod == "" || repo.Spec.AccessMethod == AccessMethodMount +} + +func (repo *BackupRepo) AccessByTool() bool { + return repo.Spec.AccessMethod == AccessMethodTool +} diff --git a/apis/storage/v1alpha1/storageprovider_types.go b/apis/storage/v1alpha1/storageprovider_types.go index d92721247ad..daa85e6ed4a 100644 --- a/apis/storage/v1alpha1/storageprovider_types.go +++ b/apis/storage/v1alpha1/storageprovider_types.go @@ -47,6 +47,12 @@ type StorageProviderSpec struct { // +optional PersistentVolumeClaimTemplate string `json:"persistentVolumeClaimTemplate,omitempty"` + // A Go template for rendering a config used by the datasafed command. + // The template will be rendered with the following variables: + // - Parameters: a map of parameters defined in the ParametersSchema. + // +optional + DatasafedConfigTemplate string `json:"datasafedConfigTemplate,omitempty"` + // The schema describes the parameters required by this StorageProvider, // when rendering the templates. // +optional @@ -85,6 +91,9 @@ type StorageProviderStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:categories={kubeblocks},scope=Cluster +// +kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="CSIDRIVER",type="string",JSONPath=".spec.csiDriverName" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // StorageProvider is the Schema for the storageproviders API // StorageProvider describes how to provision PVCs for a specific storage system (e.g. S3, NFS, etc), diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml index 4cb9aea608e..8d61dbb411b 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml @@ -51,6 +51,13 @@ spec: spec: description: BackupRepoSpec defines the desired state of BackupRepo properties: + accessMethod: + default: Mount + description: Specifies the access method of the backup repo. + enum: + - Mount + - Tool + type: string config: additionalProperties: type: string @@ -211,6 +218,10 @@ spec: description: Backup repo reconciliation phases. Valid values are PreChecking, Failed, Ready, Deleting. type: string + toolConfigSecretName: + description: toolConfigSecretName is the name of the secret containing + the configuration for the access tool. + type: string type: object type: object served: true diff --git a/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml b/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml index 4cdbf5e72d9..e53ece23fab 100644 --- a/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml +++ b/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml @@ -18,7 +18,17 @@ spec: singular: storageprovider scope: Cluster versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .spec.csiDriverName + name: CSIDRIVER + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 schema: openAPIV3Schema: description: StorageProvider is the Schema for the storageproviders API StorageProvider @@ -48,6 +58,11 @@ spec: by the CSI driver. The template will be rendered with the following variables: - Parameters: a map of parameters defined in the ParametersSchema.' type: string + datasafedConfigTemplate: + description: 'A Go template for rendering a config used by the datasafed + command. The template will be rendered with the following variables: + - Parameters: a map of parameters defined in the ParametersSchema.' + type: string parametersSchema: description: The schema describes the parameters required by this StorageProvider, when rendering the templates. diff --git a/controllers/dataprotection/backup_controller.go b/controllers/dataprotection/backup_controller.go index 26b43b3a1b5..e64083fe855 100644 --- a/controllers/dataprotection/backup_controller.go +++ b/controllers/dataprotection/backup_controller.go @@ -538,8 +538,8 @@ func (r *BackupReconciler) handlePVCByBackupRepo(reqCtx intctrlutil.RequestCtx, // add a special label and wait for the backup repo controller to create the PVC. // we need to update the object meta immediately, because we are going to break the current reconciliation. _, err = r.patchBackupObjectLabels(reqCtx, backup, map[string]string{ - dataProtectionBackupRepoKey: repo.Name, - dataProtectionNeedRepoPVCKey: trueVal, + dataProtectionBackupRepoKey: repo.Name, + dataProtectionWaitRepoPreparationKey: trueVal, }) if err != nil { return "", "", err diff --git a/controllers/dataprotection/backuprepo_controller.go b/controllers/dataprotection/backuprepo_controller.go index a80971ccc39..3aef46b42bf 100644 --- a/controllers/dataprotection/backuprepo_controller.go +++ b/controllers/dataprotection/backuprepo_controller.go @@ -24,13 +24,15 @@ import ( "context" "crypto/md5" "encoding/hex" + "errors" "fmt" "reflect" "sort" "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + "github.com/Masterminds/sprig/v3" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,12 +48,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/generics" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -101,7 +105,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) // get repo object repo := &dpv1alpha1.BackupRepo{} if err := r.Get(ctx, req.NamespacedName, repo); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "failed to get BackupRepo") + return checkedRequeueWithError(err, reqCtx.Log, "failed to get BackupRepo") } // handle finalizer @@ -125,22 +129,14 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) provider, err := r.checkStorageProvider(reqCtx, repo) if err != nil { _ = r.updateStatus(reqCtx, repo) - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "check storage provider status failed") - } - if !meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeStorageProviderReady) { - // update status phase to failed - if err := r.updateStatus(reqCtx, repo); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "update status phase failed") - } - // will reconcile again after the storage provider becomes ready - return intctrlutil.Reconciled() + return checkedRequeueWithError(err, reqCtx.Log, "check storage provider status failed") } // check parameters for rendering templates parameters, err := r.checkParameters(reqCtx, repo) if err != nil { _ = r.updateStatus(reqCtx, repo) - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "check parameters failed") + return checkedRequeueWithError(err, reqCtx.Log, "check parameters failed") } renderCtx := renderContext{ @@ -151,17 +147,26 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) err = r.createStorageClassAndSecret(reqCtx, renderCtx, repo, provider) if err != nil { _ = r.updateStatus(reqCtx, repo) - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, + return checkedRequeueWithError(err, reqCtx.Log, "failed to create storage class and secret") } + // check PVC template err = r.checkPVCTemplate(reqCtx, renderCtx, repo, provider) if err != nil { _ = r.updateStatus(reqCtx, repo) - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, + return checkedRequeueWithError(err, reqCtx.Log, "failed to check PVC template") } + // check tool config + err = r.checkAndUpdateToolConfig(reqCtx, renderCtx, repo, provider) + if err != nil { + _ = r.updateStatus(reqCtx, repo) + return checkedRequeueWithError(err, reqCtx.Log, + "failed to check tool config") + } + // TODO: implement pre-check logic // 1. try to create a PVC and observe its status // 2. create a pre-check job, mount with the PVC and check job status @@ -169,14 +174,14 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) // update status phase to ready if all conditions are met if err = r.updateStatus(reqCtx, repo); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, + return checkedRequeueWithError(err, reqCtx.Log, "failed to update BackupRepo status") } // check associated backups, to create PVC in their namespaces if repo.Status.Phase == dpv1alpha1.BackupRepoReady { - if err = r.createPVCForAssociatedBackups(reqCtx, renderCtx, repo, provider); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, + if err = r.prepareForAssociatedBackups(reqCtx, renderCtx, repo, provider); err != nil { + return checkedRequeueWithError(err, reqCtx.Log, "check associated backups failed") } } @@ -192,13 +197,25 @@ func (r *BackupRepoReconciler) updateStatus(reqCtx intctrlutil.RequestCtx, repo if meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeStorageProviderReady) && meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeParametersChecked) && meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeStorageClassCreated) && - meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypePVCTemplateChecked) { + meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypePVCTemplateChecked) && + meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeToolConfigChecked) { phase = dpv1alpha1.BackupRepoReady } repo.Status.Phase = phase } repo.Status.IsDefault = repo.Annotations[constant.DefaultBackupRepoAnnotationKey] == trueVal + // update other fields + if repo.Status.BackupPVCName == "" { + repo.Status.BackupPVCName = randomNameForDerivedObject(repo, "pvc") + } + if repo.Status.ToolConfigSecretName == "" { + repo.Status.ToolConfigSecretName = randomNameForDerivedObject(repo, "tool-config") + } + if repo.Status.ObservedGeneration != repo.Generation { + repo.Status.ObservedGeneration = repo.Generation + } + if !reflect.DeepEqual(old.Status, repo.Status) { if err := r.Client.Status().Patch(reqCtx.Ctx, repo, client.MergeFrom(old)); err != nil { return fmt.Errorf("updateStatus failed: %w", err) @@ -207,18 +224,25 @@ func (r *BackupRepoReconciler) updateStatus(reqCtx intctrlutil.RequestCtx, repo return nil } +func (r *BackupRepoReconciler) updateConditionInDefer(reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo, + condType string, reason string, err *error) { + status := metav1.ConditionTrue + message := "" + if *err != nil { + status = metav1.ConditionFalse + message = (*err).Error() + } + updateErr := updateCondition(reqCtx.Ctx, r.Client, repo, condType, status, reason, message) + if *err == nil { + *err = updateErr + } +} + func (r *BackupRepoReconciler) checkStorageProvider( reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo) (provider *storagev1alpha1.StorageProvider, err error) { - var condType = ConditionTypeStorageProviderReady - var status metav1.ConditionStatus - var reason string - var message string - - // call updateCondition() when exiting the function. + reason := ReasonUnknownError defer func() { - if status != "" { - err = updateCondition(reqCtx.Ctx, r.Client, repo, condType, status, reason, message) - } + r.updateConditionInDefer(reqCtx, repo, ConditionTypeStorageProviderReady, reason, &err) }() // get storage provider object @@ -227,78 +251,67 @@ func (r *BackupRepoReconciler) checkStorageProvider( err = r.Client.Get(reqCtx.Ctx, providerKey, provider) if err != nil { if apierrors.IsNotFound(err) { - status = metav1.ConditionFalse reason = ReasonStorageProviderNotFound - } else { - status = metav1.ConditionUnknown - reason = ReasonUnknownError - message = err.Error() } return nil, err } // check its spec - if provider.Spec.StorageClassTemplate == "" && - provider.Spec.PersistentVolumeClaimTemplate == "" { - // both StorageClassTemplate and PersistentVolumeClaimTemplate are empty. - // in this case, we are unable to create a backup PVC. - status = metav1.ConditionFalse - reason = ReasonInvalidStorageProvider - message = "both StorageClassTemplate and PersistentVolumeClaimTemplate are empty" - return provider, nil + switch { + case repo.AccessByMount(): + if provider.Spec.StorageClassTemplate == "" && + provider.Spec.PersistentVolumeClaimTemplate == "" { + // both StorageClassTemplate and PersistentVolumeClaimTemplate are empty. + // in this case, we are unable to create a backup PVC. + reason = ReasonInvalidStorageProvider + return provider, newDependencyError("both StorageClassTemplate and PersistentVolumeClaimTemplate are empty") + } + case repo.AccessByTool(): + if provider.Spec.DatasafedConfigTemplate == "" { + reason = ReasonInvalidStorageProvider + return provider, newDependencyError("DatasafedConfigTemplate is empty") + } } // check its status if provider.Status.Phase == storagev1alpha1.StorageProviderReady { - status = metav1.ConditionTrue reason = ReasonStorageProviderReady + return provider, nil } else { - status = metav1.ConditionFalse reason = ReasonStorageProviderNotReady - message = fmt.Sprintf("storage provider %s is not ready, status: %s", - provider.Name, provider.Status.Phase) + err = newDependencyError(fmt.Sprintf("storage provider %s is not ready, status: %s", + provider.Name, provider.Status.Phase)) + return provider, err } - return provider, nil } func (r *BackupRepoReconciler) checkParameters(reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo) (parameters map[string]string, err error) { - condType := ConditionTypeParametersChecked - var status metav1.ConditionStatus - var reason string - var message string - + reason := ReasonUnknownError defer func() { - updateErr := updateCondition(reqCtx.Ctx, r.Client, repo, - condType, status, reason, message) - if err == nil { - err = updateErr - } + r.updateConditionInDefer(reqCtx, repo, ConditionTypeParametersChecked, reason, &err) }() // collect parameters for rendering templates parameters, err = r.collectParameters(reqCtx, repo) if err != nil { if apierrors.IsNotFound(err) { - status = metav1.ConditionFalse reason = ReasonCredentialSecretNotFound - message = err.Error() - return nil, err - } else { - status = metav1.ConditionUnknown - reason = ReasonUnknownError - message = err.Error() } return nil, err } // TODO: verify parameters - status = metav1.ConditionTrue reason = ReasonParametersChecked return parameters, nil } func (r *BackupRepoReconciler) createStorageClassAndSecret(reqCtx intctrlutil.RequestCtx, - renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) error { + renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) (err error) { + + reason := ReasonUnknownError + defer func() { + r.updateConditionInDefer(reqCtx, repo, ConditionTypeStorageClassCreated, reason, &err) + }() oldRepo := repo.DeepCopy() @@ -312,8 +325,9 @@ func (r *BackupRepoReconciler) createStorageClassAndSecret(reqCtx intctrlutil.Re } } renderCtx.CSIDriverSecretRef = *repo.Status.GeneratedCSIDriverSecret - // create secret if it's not exist - if _, err := r.createSecretForCSIDriver(reqCtx, renderCtx, repo, provider); err != nil { + // create or update the secret for CSI + if _, err = r.createOrUpdateSecretForCSIDriver(reqCtx, renderCtx, repo, provider); err != nil { + reason = ReasonPrepareCSISecretFailed return err } } @@ -323,18 +337,12 @@ func (r *BackupRepoReconciler) createStorageClassAndSecret(reqCtx intctrlutil.Re if repo.Status.GeneratedStorageClassName == "" { repo.Status.GeneratedStorageClassName = randomNameForDerivedObject(repo, "sc") } - if _, err := r.createStorageClass(reqCtx, renderCtx, repo, provider); err != nil { + if _, err = r.createStorageClass(reqCtx, renderCtx, repo, provider); err != nil { + reason = ReasonPrepareStorageClassFailed return err } } - // update other fields - if repo.Status.BackupPVCName == "" { - repo.Status.BackupPVCName = randomNameForDerivedObject(repo, "pvc") - } - if repo.Status.ObservedGeneration != repo.Generation { - repo.Status.ObservedGeneration = repo.Generation - } if !meta.IsStatusConditionTrue(repo.Status.Conditions, ConditionTypeStorageClassCreated) { setCondition(repo, ConditionTypeStorageClassCreated, metav1.ConditionTrue, ReasonStorageClassCreated, "") @@ -346,28 +354,34 @@ func (r *BackupRepoReconciler) createStorageClassAndSecret(reqCtx intctrlutil.Re return fmt.Errorf("failed to patch backup repo: %w", err) } } + reason = ReasonStorageClassCreated return nil } -func (r *BackupRepoReconciler) createSecretForCSIDriver( +func (r *BackupRepoReconciler) createOrUpdateSecretForCSIDriver( reqCtx intctrlutil.RequestCtx, renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) (created bool, err error) { - secretTemplateMD5 := md5Digest(provider.Spec.CSIDriverSecretTemplate) - templateValuesMD5 := md5Digest(stableSerializeMap(renderCtx.Parameters)) - condType := ConditionTypeStorageClassCreated - setSecretContent := func(secret *corev1.Secret) error { + secret := &corev1.Secret{} + secret.Name = repo.Status.GeneratedCSIDriverSecret.Name + secret.Namespace = repo.Status.GeneratedCSIDriverSecret.Namespace + + templateMd5 := md5Digest(provider.Spec.CSIDriverSecretTemplate) + parametersMd5 := renderCtx.Md5OfParameters() + shouldUpdateFunc := func() bool { + tmplMd5InSecret := secret.Annotations[dataProtectionSecretTemplateMD5AnnotationKey] + paramMd5InSecret := secret.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] + return templateMd5 != tmplMd5InSecret || parametersMd5 != paramMd5InSecret + } + + return createOrUpdateObject(reqCtx.Ctx, r.Client, secret, func() error { // render secret template content, err := renderTemplate("secret", provider.Spec.CSIDriverSecretTemplate, renderCtx) if err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionFalse, ReasonBadSecretTemplate, err.Error()) return fmt.Errorf("failed to render secret template: %w", err) } secretStringData := map[string]string{} if err = yaml.Unmarshal([]byte(content), &secretStringData); err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionFalse, ReasonBadSecretTemplate, err.Error()) return fmt.Errorf("failed to unmarshal secret content: %w", err) } secretData := make(map[string][]byte, len(secretStringData)) @@ -375,63 +389,24 @@ func (r *BackupRepoReconciler) createSecretForCSIDriver( secretData[k] = []byte(v) } secret.Data = secretData - return nil - } - - secret := &corev1.Secret{} - secret.Name = repo.Status.GeneratedCSIDriverSecret.Name - secret.Namespace = repo.Status.GeneratedCSIDriverSecret.Namespace - - // create the secret object if not exist. - // this function will retrieve the whole secret object - // when the object is existing. - created, err = createObjectIfNotExist(reqCtx.Ctx, r.Client, secret, - func() error { - secret.Labels = map[string]string{ - dataProtectionBackupRepoKey: repo.Name, - } - secret.Annotations = map[string]string{ - dataProtectionSecretTemplateMD5AnnotationKey: secretTemplateMD5, - dataProtectionTemplateValuesMD5AnnotationKey: templateValuesMD5, - } - if err := setSecretContent(secret); err != nil { - return err - } - if err := controllerutil.SetControllerReference(repo, secret, r.Scheme); err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionUnknown, ReasonUnknownError, err.Error()) - return fmt.Errorf("failed to set controller reference: %w", err) - } - return nil - }) - if err != nil { - return false, fmt.Errorf("createObjectIfNotExist for secret %s failed: %w", - client.ObjectKeyFromObject(secret), err) - } - if created { - return true, nil - } - // check if the template or config changed, then update the secret - currSecretTemplateMD5 := secret.Annotations[dataProtectionSecretTemplateMD5AnnotationKey] - currTemplateValuesMD5 := secret.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] - if currSecretTemplateMD5 != secretTemplateMD5 || currTemplateValuesMD5 != templateValuesMD5 { - patch := client.MergeFrom(secret.DeepCopy()) - if err := setSecretContent(secret); err != nil { - return false, err + // set labels and annotations + if secret.Labels == nil { + secret.Labels = make(map[string]string) } + secret.Labels[dataProtectionBackupRepoKey] = repo.Name + if secret.Annotations == nil { secret.Annotations = make(map[string]string) } - secret.Annotations[dataProtectionSecretTemplateMD5AnnotationKey] = secretTemplateMD5 - secret.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] = templateValuesMD5 - err := r.Client.Patch(reqCtx.Ctx, secret, patch) - if err != nil { - return false, fmt.Errorf("failed to patch secret object %s: %w", - client.ObjectKeyFromObject(secret), err) + secret.Annotations[dataProtectionSecretTemplateMD5AnnotationKey] = templateMd5 + secret.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] = parametersMd5 + + if err := controllerutil.SetControllerReference(repo, secret, r.Scheme); err != nil { + return fmt.Errorf("failed to set controller reference: %w", err) } - } - return false, nil + return nil + }, shouldUpdateFunc) } func (r *BackupRepoReconciler) createStorageClass( @@ -442,18 +417,12 @@ func (r *BackupRepoReconciler) createStorageClass( storageClass.Name = repo.Status.GeneratedStorageClassName return createObjectIfNotExist(reqCtx.Ctx, r.Client, storageClass, func() error { - condType := ConditionTypeStorageClassCreated - // render storage class template content, err := renderTemplate("sc", provider.Spec.StorageClassTemplate, renderCtx) if err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionFalse, ReasonBadStorageClassTemplate, err.Error()) return fmt.Errorf("failed to render storage class template: %w", err) } if err = yaml.Unmarshal([]byte(content), storageClass); err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionFalse, ReasonBadStorageClassTemplate, err.Error()) return fmt.Errorf("failed to unmarshal storage class: %w", err) } @@ -467,8 +436,6 @@ func (r *BackupRepoReconciler) createStorageClass( storageClass.ReclaimPolicy = &repo.Spec.PVReclaimPolicy } if err := controllerutil.SetControllerReference(repo, storageClass, r.Scheme); err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionUnknown, ReasonUnknownError, err.Error()) return fmt.Errorf("failed to set owner reference: %w", err) } return nil @@ -476,27 +443,98 @@ func (r *BackupRepoReconciler) createStorageClass( } func (r *BackupRepoReconciler) checkPVCTemplate(reqCtx intctrlutil.RequestCtx, - renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) error { + renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) (err error) { + + reason := ReasonUnknownError + defer func() { + r.updateConditionInDefer(reqCtx, repo, ConditionTypePVCTemplateChecked, reason, &err) + }() - condType := ConditionTypePVCTemplateChecked + if !repo.AccessByMount() || provider.Spec.PersistentVolumeClaimTemplate == "" { + return nil + } checkedTemplateMd5 := repo.Annotations[dataProtectionPVCTemplateMD5MD5AnnotationKey] + checkedParametersMd5 := repo.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] currentTemplateMd5 := md5Digest(provider.Spec.PersistentVolumeClaimTemplate) - if provider.Spec.PersistentVolumeClaimTemplate != "" && checkedTemplateMd5 != currentTemplateMd5 { + currentParametersMd5 := renderCtx.Md5OfParameters() + if checkedTemplateMd5 != currentTemplateMd5 || checkedParametersMd5 != currentParametersMd5 { pvc := &corev1.PersistentVolumeClaim{} err := r.constructPVCByTemplate(renderCtx, pvc, repo, provider.Spec.PersistentVolumeClaimTemplate) if err != nil { - _ = updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionFalse, ReasonBadPVCTemplate, err.Error()) + reason = ReasonBadPVCTemplate return err } } - if err := updateCondition(reqCtx.Ctx, r.Client, repo, condType, - metav1.ConditionTrue, ReasonPVCTemplateChecked, ""); err != nil { + if err = updateAnnotations(reqCtx.Ctx, r.Client, repo, map[string]string{ + dataProtectionPVCTemplateMD5MD5AnnotationKey: currentTemplateMd5, + dataProtectionTemplateValuesMD5AnnotationKey: currentParametersMd5, + }); err != nil { return err } - return updateAnnotations(reqCtx.Ctx, r.Client, repo, map[string]string{ - dataProtectionPVCTemplateMD5MD5AnnotationKey: currentTemplateMd5, + reason = ReasonPVCTemplateChecked + return nil +} + +func (r *BackupRepoReconciler) checkAndUpdateToolConfig(reqCtx intctrlutil.RequestCtx, + renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) (err error) { + + reason := ReasonUnknownError + defer func() { + r.updateConditionInDefer(reqCtx, repo, ConditionTypeToolConfigChecked, reason, &err) + }() + + if !repo.AccessByTool() { + return nil + } + checkedTemplateMd5 := repo.Annotations[dataProtectionToolConfigTemplateMD5MD5AnnotationKey] + checkedParametersMd5 := repo.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] + currentTemplateMd5 := md5Digest(provider.Spec.DatasafedConfigTemplate) + currentParametersMd5 := renderCtx.Md5OfParameters() + if !(checkedTemplateMd5 != currentTemplateMd5 || checkedParametersMd5 != currentParametersMd5) { + return nil + } + // check tool config template + content, err := renderTemplate("tool-config", provider.Spec.DatasafedConfigTemplate, renderCtx) + if err != nil { + reason = ReasonBadToolConfigTemplate + return err + } + // update existing tool config secrets + secretList := &corev1.SecretList{} + err = r.Client.List(reqCtx.Ctx, secretList, client.MatchingLabels{ + dataProtectionBackupRepoKey: repo.Name, + dataProtectionIsToolConfigKey: trueVal, }) + if err != nil { + return err + } + for idx := range secretList.Items { + secret := &secretList.Items[idx] + tmplMd5InSecret := secret.Annotations[dataProtectionToolConfigTemplateMD5MD5AnnotationKey] + paramMd5InSecret := secret.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] + if tmplMd5InSecret == currentTemplateMd5 && paramMd5InSecret == currentParametersMd5 { + continue + } + patch := client.MergeFrom(secret.DeepCopy()) + constructToolConfigSecret(secret, content) + if secret.Annotations == nil { + secret.Annotations = make(map[string]string) + } + secret.Annotations[dataProtectionToolConfigTemplateMD5MD5AnnotationKey] = currentTemplateMd5 + secret.Annotations[dataProtectionTemplateValuesMD5AnnotationKey] = currentParametersMd5 + if err = r.Client.Patch(reqCtx.Ctx, secret, patch); err != nil { + return err + } + } + + if err = updateAnnotations(reqCtx.Ctx, r.Client, repo, map[string]string{ + dataProtectionToolConfigTemplateMD5MD5AnnotationKey: currentTemplateMd5, + dataProtectionTemplateValuesMD5AnnotationKey: currentParametersMd5, + }); err != nil { + return err + } + reason = ReasonToolConfigChecked + return nil } func (r *BackupRepoReconciler) constructPVCByTemplate( @@ -537,12 +575,12 @@ func (r *BackupRepoReconciler) listAssociatedBackups( return filtered, err } -func (r *BackupRepoReconciler) createPVCForAssociatedBackups( +func (r *BackupRepoReconciler) prepareForAssociatedBackups( reqCtx intctrlutil.RequestCtx, renderCtx renderContext, repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider) error { backups, err := r.listAssociatedBackups(reqCtx, repo, map[string]string{ - dataProtectionNeedRepoPVCKey: trueVal, + dataProtectionWaitRepoPreparationKey: trueVal, }) if err != nil { return err @@ -550,14 +588,26 @@ func (r *BackupRepoReconciler) createPVCForAssociatedBackups( // return any error to reconcile the repo var retErr error for _, backup := range backups { - if err := r.checkOrCreatePVC(reqCtx, renderCtx, repo, provider, backup.Namespace); err != nil { - reqCtx.Log.Error(err, "failed to check or create PVC", "namespace", backup.Namespace) - retErr = err - continue + switch { + case repo.AccessByMount(): + if err := r.checkOrCreatePVC(reqCtx, renderCtx, repo, provider, backup.Namespace); err != nil { + reqCtx.Log.Error(err, "failed to check or create PVC", "namespace", backup.Namespace) + retErr = err + continue + } + case repo.AccessByTool(): + if err := r.checkOrCreateToolConfigSecret(reqCtx, renderCtx, repo, provider, backup.Namespace); err != nil { + reqCtx.Log.Error(err, "failed to check or create tool config secret", "namespace", backup.Namespace) + retErr = err + continue + } + default: + retErr = fmt.Errorf("unknown access method: %s", repo.Spec.AccessMethod) } - if backup.Labels[dataProtectionNeedRepoPVCKey] != "" { + + if backup.Labels[dataProtectionWaitRepoPreparationKey] != "" { patch := client.MergeFrom(backup.DeepCopy()) - delete(backup.Labels, dataProtectionNeedRepoPVCKey) + delete(backup.Labels, dataProtectionWaitRepoPreparationKey) if err = r.Client.Patch(reqCtx.Ctx, backup, patch); err != nil { reqCtx.Log.Error(err, "failed to patch backup", "backup", client.ObjectKeyFromObject(backup)) @@ -574,6 +624,8 @@ func (r *BackupRepoReconciler) checkOrCreatePVC( repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider, namespace string) error { pvc := &corev1.PersistentVolumeClaim{} + pvc.Name = repo.Status.BackupPVCName + pvc.Namespace = namespace _, err := createObjectIfNotExist(reqCtx.Ctx, r.Client, pvc, func() error { if provider.Spec.PersistentVolumeClaimTemplate != "" { @@ -582,6 +634,9 @@ func (r *BackupRepoReconciler) checkOrCreatePVC( if err != nil { return err } + // overwrite PVC name and namespace + pvc.Name = repo.Status.BackupPVCName + pvc.Namespace = namespace } else { // set storage class name to PVC, other fields will be set with default value later storageClassName := repo.Status.GeneratedStorageClassName @@ -589,9 +644,6 @@ func (r *BackupRepoReconciler) checkOrCreatePVC( StorageClassName: &storageClassName, } } - // overwrite PVC name and namespace - pvc.Name = repo.Status.BackupPVCName - pvc.Namespace = namespace // add a referencing label if pvc.Labels == nil { pvc.Labels = make(map[string]string) @@ -621,6 +673,45 @@ func (r *BackupRepoReconciler) checkOrCreatePVC( return err } +func constructToolConfigSecret(secret *corev1.Secret, content string) { + secret.Data = map[string][]byte{ + "datasafed.conf": []byte(content), + } +} + +func (r *BackupRepoReconciler) checkOrCreateToolConfigSecret( + reqCtx intctrlutil.RequestCtx, renderCtx renderContext, + repo *dpv1alpha1.BackupRepo, provider *storagev1alpha1.StorageProvider, namespace string) error { + + secret := &corev1.Secret{} + secret.Name = repo.Status.ToolConfigSecretName + secret.Namespace = namespace + _, err := createObjectIfNotExist(reqCtx.Ctx, r.Client, secret, + func() error { + content, err := renderTemplate("tool-config", provider.Spec.DatasafedConfigTemplate, renderCtx) + if err != nil { + return fmt.Errorf("failed to render tool config template: %w", err) + } + constructToolConfigSecret(secret, content) + + // add a referencing label + secret.Labels = map[string]string{ + dataProtectionBackupRepoKey: repo.Name, + dataProtectionIsToolConfigKey: trueVal, + } + secret.Annotations = map[string]string{ + dataProtectionTemplateValuesMD5AnnotationKey: renderCtx.Md5OfParameters(), + dataProtectionToolConfigTemplateMD5MD5AnnotationKey: md5Digest(provider.Spec.DatasafedConfigTemplate), + } + if err := controllerutil.SetControllerReference(repo, secret, r.Scheme); err != nil { + return fmt.Errorf("failed to set owner reference: %w", err) + } + return nil + }) + + return err +} + func (r *BackupRepoReconciler) collectParameters( reqCtx intctrlutil.RequestCtx, repo *dpv1alpha1.BackupRepo) (map[string]string, error) { values := make(map[string]string) @@ -682,7 +773,7 @@ func (r *BackupRepoReconciler) deleteExternalResources( return err } - // delete derived secrets + // delete derived secrets (secret for CSI and tool configs) if err := r.deleteSecrets(reqCtx, repo); err != nil { return err } @@ -787,9 +878,9 @@ func (r *BackupRepoReconciler) mapBackupToRepo(obj client.Object) []ctrl.Request return nil } // we should reconcile the BackupRepo when: - // 1. the Backup needs a PVC which is not present and should be created by the BackupRepo. + // 1. the Backup needs to use the BackupRepo, but it's not ready for the namespace. // 2. the Backup is being deleted, because it may block the deletion of the BackupRepo. - shouldReconcileRepo := backup.Labels[dataProtectionNeedRepoPVCKey] == trueVal || + shouldReconcileRepo := backup.Labels[dataProtectionWaitRepoPreparationKey] == trueVal || !backup.DeletionTimestamp.IsZero() if shouldReconcileRepo { return []ctrl.Request{{ @@ -841,10 +932,45 @@ func (r *BackupRepoReconciler) SetupWithManager(mgr ctrl.Manager) error { // helper functions // ============================================================================ +// dependencyError indicates that the error itself cannot be resolved +// unless the dependent object is updated. +type dependencyError struct { + msg string +} + +func (e *dependencyError) Error() string { + return e.msg +} + +func newDependencyError(msg string) error { + return &dependencyError{msg: msg} +} + +func isDependencyError(err error) bool { + de, ok := err.(*dependencyError) + return ok || errors.As(err, &de) +} + +func checkedRequeueWithError(err error, logger logr.Logger, msg string, keysAndValues ...interface{}) (reconcile.Result, error) { + if apierrors.IsNotFound(err) || isDependencyError(err) { + return intctrlutil.Reconciled() + } + return intctrlutil.RequeueWithError(err, logger, msg, keysAndValues...) +} + type renderContext struct { Parameters map[string]string CSIDriverSecretRef corev1.SecretReference GeneratedStorageClassName string + + md5OfParameters string +} + +func (r *renderContext) Md5OfParameters() string { + if r.md5OfParameters == "" { + r.md5OfParameters = md5Digest(stableSerializeMap(r.Parameters)) + } + return r.md5OfParameters } func renderTemplate(name, tpl string, rCtx renderContext) (string, error) { @@ -858,19 +984,24 @@ func renderTemplate(name, tpl string, rCtx renderContext) (string, error) { return b.String(), err } -func createObjectIfNotExist( +func createOrUpdateObject[T any, PT generics.PObject[T]]( ctx context.Context, c client.Client, - obj client.Object, - mutateFunc func() error) (created bool, err error) { + obj PT, + mutateFunc func() error, + shouldUpdate func() bool) (created bool, err error) { key := client.ObjectKeyFromObject(obj) err = c.Get(ctx, key, obj) if err != nil && !apierrors.IsNotFound(err) { - return false, fmt.Errorf("failed to check existence of object: %w", err) + return false, fmt.Errorf("failed to check existence of object %s: %w", key, err) } + var patch client.Patch if err == nil { - // already exists - return false, nil + // object already exists, check if it needs to be updated + if !shouldUpdate() { + return false, nil + } + patch = client.MergeFrom(PT(obj.DeepCopy())) } if mutateFunc != nil { err := mutateFunc() @@ -878,12 +1009,28 @@ func createObjectIfNotExist( return false, err } } - err = c.Create(ctx, obj) - if err != nil { - return false, fmt.Errorf("failed to create object %s: %w", - client.ObjectKeyFromObject(obj), err) + if patch != nil { + err = c.Patch(ctx, obj, patch) + if err != nil { + err = fmt.Errorf("failed to patch object %s: %w", key, err) + } + return false, err + } else { + err = c.Create(ctx, obj) + if err != nil { + return false, fmt.Errorf("failed to create object %s: %w", key, err) + } + return true, nil } - return true, nil +} + +func createObjectIfNotExist[T any, PT generics.PObject[T]]( + ctx context.Context, + c client.Client, + obj PT, + mutateFunc func() error) (created bool, err error) { + noUpdate := func() bool { return false } + return createOrUpdateObject(ctx, c, obj, mutateFunc, noUpdate) } func setCondition( diff --git a/controllers/dataprotection/backuprepo_controller_test.go b/controllers/dataprotection/backuprepo_controller_test.go index ff1bc69175d..ae6f95c9205 100644 --- a/controllers/dataprotection/backuprepo_controller_test.go +++ b/controllers/dataprotection/backuprepo_controller_test.go @@ -163,7 +163,7 @@ var _ = Describe("BackupRepo controller", func() { repoKey = client.ObjectKeyFromObject(repo) } - createStorageProviderSpec := func(mutateFunc func(repo *storagev1alpha1.StorageProvider)) { + createStorageProviderSpec := func(mutateFunc func(provider *storagev1alpha1.StorageProvider)) { obj := &storagev1alpha1.StorageProvider{} obj.GenerateName = "storageprovider-" obj.Spec.CSIDriverName = defaultCSIDriverName @@ -207,15 +207,34 @@ parameters: obj.GenerateName = "backup-" obj.Namespace = testCtx.DefaultNamespace obj.Labels = map[string]string{ - dataProtectionBackupRepoKey: repoKey.Name, - dataProtectionNeedRepoPVCKey: "true", + dataProtectionBackupRepoKey: repoKey.Name, + dataProtectionWaitRepoPreparationKey: trueVal, } obj.Spec.BackupType = dpv1alpha1.BackupTypeSnapshot obj.Spec.BackupPolicyName = "default" if mutateFunc != nil { mutateFunc(obj) } - return testapps.CreateK8sResource(&testCtx, obj).(*dpv1alpha1.Backup) + backup := testapps.CreateK8sResource(&testCtx, obj).(*dpv1alpha1.Backup) + // updating the status of the Backup to COMPLETED, backup repo controller only + // handles for non-failed backups. + Eventually(func(g Gomega) { + obj := &dpv1alpha1.Backup{} + err := testCtx.Cli.Get(testCtx.Ctx, client.ObjectKeyFromObject(backup), obj) + g.Expect(err).ShouldNot(HaveOccurred()) + if obj.Status.Phase == dpv1alpha1.BackupFailed { + // the controller will set the status to failed because + // essential objects (e.g. backup policy) are missed. + // we set the status to completed after that, to avoid conflict. + obj.Status.Phase = dpv1alpha1.BackupCompleted + err = testCtx.Cli.Status().Update(testCtx.Ctx, obj) + g.Expect(err).ShouldNot(HaveOccurred()) + } else { + // check again + g.Expect(false).Should(BeTrue()) + } + }).Should(Succeed()) + return backup } getBackupRepo := func(g Gomega, key types.NamespacedName) *dpv1alpha1.BackupRepo { @@ -450,7 +469,7 @@ parameters: cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeStorageClassCreated) g.Expect(cond).NotTo(BeNil()) g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) - g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonBadSecretTemplate)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonPrepareCSISecretFailed)) g.Expect(cond.Message).Should(ContainSubstring(`function "bad" not defined`)) })).Should(Succeed()) }) @@ -466,7 +485,7 @@ parameters: cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeStorageClassCreated) g.Expect(cond).NotTo(BeNil()) g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) - g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonBadSecretTemplate)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonPrepareCSISecretFailed)) g.Expect(cond.Message).Should(ContainSubstring(`cannot unmarshal string into Go value of type map[string]string`)) })).Should(Succeed()) }) @@ -484,7 +503,7 @@ parameters: cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeStorageClassCreated) g.Expect(cond).NotTo(BeNil()) g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) - g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonBadStorageClassTemplate)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonPrepareStorageClassFailed)) g.Expect(cond.Message).Should(ContainSubstring(`function "bad" not defined`)) })).Should(Succeed()) }) @@ -502,7 +521,7 @@ parameters: cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeStorageClassCreated) g.Expect(cond).NotTo(BeNil()) g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) - g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonBadStorageClassTemplate)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonPrepareStorageClassFailed)) g.Expect(cond.Message).Should(ContainSubstring(`cannot unmarshal string into Go value of type v1.StorageClass`)) })).Should(Succeed()) }) @@ -518,23 +537,6 @@ parameters: backup = createBackupSpec(func(backup *dpv1alpha1.Backup) { backup.Namespace = namespace }) - By("updating the status of the Backup to completed") - Eventually(func(g Gomega) { - obj := &dpv1alpha1.Backup{} - err := testCtx.Cli.Get(testCtx.Ctx, client.ObjectKeyFromObject(backup), obj) - g.Expect(err).ShouldNot(HaveOccurred()) - if obj.Status.Phase == dpv1alpha1.BackupFailed { - // the controller will set the status to failed because - // essential objects (e.g. backup policy) are missed. - // we set the status to completed after that, to avoid conflict. - obj.Status.Phase = dpv1alpha1.BackupCompleted - err = testCtx.Cli.Status().Update(testCtx.Ctx, obj) - g.Expect(err).ShouldNot(HaveOccurred()) - } else { - // check again - g.Expect(false).Should(BeTrue()) - } - }).Should(Succeed()) By("checking the PVC has been created in the namespace") pvcKey := types.NamespacedName{ Name: pvcName, @@ -557,7 +559,7 @@ parameters: Context("storage provider with PersistentVolumeClaimTemplate", func() { It("should create a PVC in Backup's namespace (in default namespace)", func() { By("setting the PersistentVolumeClaimTemplate") - Eventually(testapps.GetAndChangeObj(&testCtx, providerKey, func(provider *storagev1alpha1.StorageProvider) { + createStorageProviderSpec(func(provider *storagev1alpha1.StorageProvider) { provider.Spec.PersistentVolumeClaimTemplate = ` apiVersion: v1 kind: PersistentVolumeClaim @@ -571,7 +573,8 @@ spec: resources: volumeMode: Filesystem ` - })).Should(Succeed()) + }) + createBackupRepoSpec(nil) _, pvcName := createBackupAndCheckPVC(testCtx.DefaultNamespace) Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{Name: pvcName, Namespace: testCtx.DefaultNamespace}, @@ -655,6 +658,143 @@ spec: })).Should(Succeed()) }) + Context("with AccessMethodTool", func() { + var repo *dpv1alpha1.BackupRepo + var backup *dpv1alpha1.Backup + var toolConfigSecretKey types.NamespacedName + + BeforeEach(func() { + By("preparing") + createStorageProviderSpec(func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.DatasafedConfigTemplate = ` +[storage] +type=local +key1={{ index .Parameters "key1" }} +key2={{ index .Parameters "key2" }} +cred-key1={{ index .Parameters "cred-key1" }} +cred-key2={{ index .Parameters "cred-key2" }} +` + }) + createBackupRepoSpec(func(repo *dpv1alpha1.BackupRepo) { + repo.Spec.AccessMethod = dpv1alpha1.AccessMethodTool + }) + + Eventually(testapps.CheckObj(&testCtx, repoKey, func(g Gomega, obj *dpv1alpha1.BackupRepo) { + g.Expect(obj.Status.Phase).Should(Equal(dpv1alpha1.BackupRepoReady)) + repo = obj + })).Should(Succeed()) + + backup = createBackupSpec(nil) + toolConfigSecretKey = types.NamespacedName{ + Name: repo.Status.ToolConfigSecretName, + Namespace: backup.Namespace, + } + Eventually(testapps.CheckObjExists(&testCtx, toolConfigSecretKey, &corev1.Secret{}, true)).Should(Succeed()) + }) + + It("should check that the storage provider has a non-empty datasafedConfigTemplate", func() { + By("preparing") + createStorageProviderSpec(func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.DatasafedConfigTemplate = "" + }) + createBackupRepoSpec(func(repo *dpv1alpha1.BackupRepo) { + repo.Spec.AccessMethod = dpv1alpha1.AccessMethodTool + }) + By("checking") + Eventually(testapps.CheckObj(&testCtx, repoKey, func(g Gomega, repo *dpv1alpha1.BackupRepo) { + g.Expect(repo.Status.Phase).Should(Equal(dpv1alpha1.BackupRepoFailed)) + cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeStorageProviderReady) + g.Expect(cond).NotTo(BeNil()) + g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonInvalidStorageProvider)) + g.Expect(cond.Message).Should(ContainSubstring("DatasafedConfigTemplate is empty")) + })).Should(Succeed()) + }) + + It("should fail if the datasafedConfigTemplate is invalid", func() { + By("preparing") + createStorageProviderSpec(func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.DatasafedConfigTemplate = "bad template {{" + }) + createBackupRepoSpec(func(repo *dpv1alpha1.BackupRepo) { + repo.Spec.AccessMethod = dpv1alpha1.AccessMethodTool + }) + By("checking") + Eventually(testapps.CheckObj(&testCtx, repoKey, func(g Gomega, repo *dpv1alpha1.BackupRepo) { + g.Expect(repo.Status.Phase).Should(Equal(dpv1alpha1.BackupRepoFailed)) + cond := meta.FindStatusCondition(repo.Status.Conditions, ConditionTypeToolConfigChecked) + g.Expect(cond).NotTo(BeNil()) + g.Expect(cond.Status).Should(BeEquivalentTo(corev1.ConditionFalse)) + g.Expect(cond.Reason).Should(BeEquivalentTo(ReasonBadToolConfigTemplate)) + })).Should(Succeed()) + }) + + It("should create the secret containing the tool config", func() { + Eventually(testapps.CheckObj(&testCtx, toolConfigSecretKey, func(g Gomega, secret *corev1.Secret) { + g.Expect(secret.Data).Should(HaveKeyWithValue("datasafed.conf", []byte(` +[storage] +type=local +key1=val1 +key2=val2 +cred-key1=cred-val1 +cred-key2=cred-val2 +`))) + })).Should(Succeed()) + + By("creating a backup in namespace2") + createBackupSpec(func(backup *dpv1alpha1.Backup) { + backup.Namespace = namespace2 + }) + secretKey := types.NamespacedName{ + Name: repo.Status.ToolConfigSecretName, + Namespace: namespace2, + } + Eventually(testapps.CheckObjExists(&testCtx, secretKey, &corev1.Secret{}, true)).Should(Succeed()) + }) + + It("should update the content of the secret when the template or the value changes", func() { + By("changing the template") + Eventually(testapps.GetAndChangeObj(&testCtx, providerKey, func(provider *storagev1alpha1.StorageProvider) { + provider.Spec.DatasafedConfigTemplate += "new-item=new-value\n" + })).Should(Succeed()) + Eventually(testapps.CheckObj(&testCtx, toolConfigSecretKey, func(g Gomega, secret *corev1.Secret) { + g.Expect(secret.Data).Should(HaveKeyWithValue("datasafed.conf", []byte(` +[storage] +type=local +key1=val1 +key2=val2 +cred-key1=cred-val1 +cred-key2=cred-val2 +new-item=new-value +`))) + })).Should(Succeed()) + + By("changing the value") + Eventually(testapps.GetAndChangeObj(&testCtx, repoKey, func(repo *dpv1alpha1.BackupRepo) { + repo.Spec.Config["key1"] = "changed-val1" + })).Should(Succeed()) + Eventually(testapps.CheckObj(&testCtx, toolConfigSecretKey, func(g Gomega, secret *corev1.Secret) { + g.Expect(secret.Data).Should(HaveKeyWithValue("datasafed.conf", []byte(` +[storage] +type=local +key1=changed-val1 +key2=val2 +cred-key1=cred-val1 +cred-key2=cred-val2 +new-item=new-value +`))) + })).Should(Succeed()) + }) + + It("should delete the secret when the repo is deleted", func() { + By("deleting the Backup and BackupRepo") + testapps.DeleteObject(&testCtx, client.ObjectKeyFromObject(backup), &dpv1alpha1.Backup{}) + testapps.DeleteObject(&testCtx, repoKey, &dpv1alpha1.BackupRepo{}) + By("checking the secret is deleted") + Eventually(testapps.CheckObjExists(&testCtx, toolConfigSecretKey, &corev1.Secret{}, false)).Should(Succeed()) + }) + }) + It("should block the deletion of the BackupRepo if derived objects are not deleted", func() { backup, pvcName := createBackupAndCheckPVC(namespace2) diff --git a/controllers/dataprotection/type.go b/controllers/dataprotection/type.go index fea863f5e6d..6860acd232d 100644 --- a/controllers/dataprotection/type.go +++ b/controllers/dataprotection/type.go @@ -49,13 +49,15 @@ const ( dataProtectionBackupTargetPodKey = "dataprotection.kubeblocks.io/target-pod-name" dataProtectionAnnotationCreateByPolicyKey = "dataprotection.kubeblocks.io/created-by-policy" - dataProtectionBackupRepoKey = "dataprotection.kubeblocks.io/backup-repo-name" - dataProtectionNeedRepoPVCKey = "dataprotection.kubeblocks.io/need-repo-pvc" + dataProtectionBackupRepoKey = "dataprotection.kubeblocks.io/backup-repo-name" + dataProtectionWaitRepoPreparationKey = "dataprotection.kubeblocks.io/wait-repo-preparation" + dataProtectionIsToolConfigKey = "dataprotection.kubeblocks.io/is-tool-config" // annotation keys - dataProtectionSecretTemplateMD5AnnotationKey = "dataprotection.kubeblocks.io/secret-template-md5" - dataProtectionTemplateValuesMD5AnnotationKey = "dataprotection.kubeblocks.io/template-values-md5" - dataProtectionPVCTemplateMD5MD5AnnotationKey = "dataprotection.kubeblocks.io/pvc-template-md5" + dataProtectionSecretTemplateMD5AnnotationKey = "dataprotection.kubeblocks.io/secret-template-md5" + dataProtectionTemplateValuesMD5AnnotationKey = "dataprotection.kubeblocks.io/template-values-md5" + dataProtectionPVCTemplateMD5MD5AnnotationKey = "dataprotection.kubeblocks.io/pvc-template-md5" + dataProtectionToolConfigTemplateMD5MD5AnnotationKey = "dataprotection.kubeblocks.io/tool-config-template-md5" // the key of persistentVolumeTemplate in the configmap. persistentVolumeTemplateKey = "persistentVolume" @@ -70,24 +72,27 @@ const ( ConditionTypeParametersChecked = "ParametersChecked" ConditionTypeStorageClassCreated = "StorageClassCreated" ConditionTypePVCTemplateChecked = "PVCTemplateChecked" + ConditionTypeToolConfigChecked = "ToolConfigSecretChecked" ConditionTypeDerivedObjectsDeleted = "DerivedObjectsDeleted" // condition reasons - ReasonStorageProviderReady = "StorageProviderReady" - ReasonStorageProviderNotReady = "StorageProviderNotReady" - ReasonStorageProviderNotFound = "StorageProviderNotFound" - ReasonInvalidStorageProvider = "InvalidStorageProvider" - ReasonParametersChecked = "ParametersChecked" - ReasonCredentialSecretNotFound = "CredentialSecretNotFound" - ReasonBadSecretTemplate = "BadSecretTemplate" - ReasonBadStorageClassTemplate = "BadStorageClassTemplate" - ReasonBadPVCTemplate = "BadPVCTemplate" - ReasonStorageClassCreated = "StorageClassCreated" - ReasonPVCTemplateChecked = "PVCTemplateChecked" - ReasonHaveAssociatedBackups = "HaveAssociatedBackups" - ReasonHaveResidualPVCs = "HaveResidualPVCs" - ReasonDerivedObjectsDeleted = "DerivedObjectsDeleted" - ReasonUnknownError = "UnknownError" + ReasonStorageProviderReady = "StorageProviderReady" + ReasonStorageProviderNotReady = "StorageProviderNotReady" + ReasonStorageProviderNotFound = "StorageProviderNotFound" + ReasonInvalidStorageProvider = "InvalidStorageProvider" + ReasonParametersChecked = "ParametersChecked" + ReasonCredentialSecretNotFound = "CredentialSecretNotFound" + ReasonPrepareCSISecretFailed = "PrepareCSISecretFailed" + ReasonPrepareStorageClassFailed = "PrepareStorageClassFailed" + ReasonBadPVCTemplate = "BadPVCTemplate" + ReasonBadToolConfigTemplate = "BadToolConfigTemplate" + ReasonStorageClassCreated = "StorageClassCreated" + ReasonPVCTemplateChecked = "PVCTemplateChecked" + ReasonToolConfigChecked = "ToolConfigChecked" + ReasonHaveAssociatedBackups = "HaveAssociatedBackups" + ReasonHaveResidualPVCs = "HaveResidualPVCs" + ReasonDerivedObjectsDeleted = "DerivedObjectsDeleted" + ReasonUnknownError = "UnknownError" ) const manifestsUpdaterContainerName = "manifests-updater" diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml index 4cb9aea608e..8d61dbb411b 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml @@ -51,6 +51,13 @@ spec: spec: description: BackupRepoSpec defines the desired state of BackupRepo properties: + accessMethod: + default: Mount + description: Specifies the access method of the backup repo. + enum: + - Mount + - Tool + type: string config: additionalProperties: type: string @@ -211,6 +218,10 @@ spec: description: Backup repo reconciliation phases. Valid values are PreChecking, Failed, Ready, Deleting. type: string + toolConfigSecretName: + description: toolConfigSecretName is the name of the secret containing + the configuration for the access tool. + type: string type: object type: object served: true diff --git a/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml b/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml index 4cdbf5e72d9..e53ece23fab 100644 --- a/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml +++ b/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml @@ -18,7 +18,17 @@ spec: singular: storageprovider scope: Cluster versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .spec.csiDriverName + name: CSIDRIVER + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 schema: openAPIV3Schema: description: StorageProvider is the Schema for the storageproviders API StorageProvider @@ -48,6 +58,11 @@ spec: by the CSI driver. The template will be rendered with the following variables: - Parameters: a map of parameters defined in the ParametersSchema.' type: string + datasafedConfigTemplate: + description: 'A Go template for rendering a config used by the datasafed + command. The template will be rendered with the following variables: + - Parameters: a map of parameters defined in the ParametersSchema.' + type: string parametersSchema: description: The schema describes the parameters required by this StorageProvider, when rendering the templates. diff --git a/deploy/helm/templates/storageprovider/cos.yaml b/deploy/helm/templates/storageprovider/cos.yaml index fc9a597a87a..ec39ede7d5a 100644 --- a/deploy/helm/templates/storageprovider/cos.yaml +++ b/deploy/helm/templates/storageprovider/cos.yaml @@ -33,6 +33,16 @@ spec: csi.storage.k8s.io/node-publish-secret-name: {{ `{{ .CSIDriverSecretRef.Name }}` }} csi.storage.k8s.io/node-publish-secret-namespace: {{ `{{ .CSIDriverSecretRef.Namespace }}` }} + datasafedConfigTemplate: | + [storage] + type = s3 + provider = TencentCOS + env_auth = false + access_key_id = {{ `{{ index .Parameters "accessKeyId" }}` }} + secret_access_key = {{ `{{ index .Parameters "secretAccessKey" }}` }} + endpoint = {{ `{{ printf "cos.%s.myqcloud.com" .Parameters.region }}` }} + root = {{ `{{ index .Parameters "bucket" }}` }} + parametersSchema: openAPIV3Schema: type: "object" diff --git a/deploy/helm/templates/storageprovider/gcs.yaml b/deploy/helm/templates/storageprovider/gcs.yaml index 9e835fc4f24..0d8ae56f63b 100644 --- a/deploy/helm/templates/storageprovider/gcs.yaml +++ b/deploy/helm/templates/storageprovider/gcs.yaml @@ -8,13 +8,12 @@ metadata: spec: csiDriverName: ru.yandex.s3.csi csiDriverSecretTemplate: | - accessKeyID: {{ `{{ index .Parameters "accessKeyId" }}` }} - secretAccessKey: {{ `{{ index .Parameters "secretAccessKey" }}` }} - {{ `{{- $region := index .Parameters "region" }}` }} {{ `{{- $endpoint := index .Parameters "endpoint" }}` }} {{ `{{- if not $endpoint }}` }} {{ `{{- $endpoint = (printf "https://storage.googleapis.com") }}` }} {{ `{{- end }}` }} + accessKeyID: {{ `{{ index .Parameters "accessKeyId" }}` }} + secretAccessKey: {{ `{{ index .Parameters "secretAccessKey" }}` }} endpoint: {{ `{{ $endpoint }}` }} storageClassTemplate: | @@ -33,6 +32,20 @@ spec: csi.storage.k8s.io/node-publish-secret-name: {{ `{{ .CSIDriverSecretRef.Name }}` }} csi.storage.k8s.io/node-publish-secret-namespace: {{ `{{ .CSIDriverSecretRef.Namespace }}` }} + datasafedConfigTemplate: | + [storage] + type = s3 + provider = GCS + env_auth = false + access_key_id = {{ `{{ index .Parameters "accessKeyId" }}` }} + secret_access_key = {{ `{{ index .Parameters "secretAccessKey" }}` }} + {{ `{{- $endpoint := index .Parameters "endpoint" }}` }} + {{ `{{- if not $endpoint }}` }} + {{ `{{- $endpoint = (printf "https://storage.googleapis.com") }}` }} + {{ `{{- end }}` }} + endpoint = {{ `{{ $endpoint }}` }} + root = {{ `{{ index .Parameters "bucket" }}` }} + parametersSchema: openAPIV3Schema: type: "object" diff --git a/deploy/helm/templates/storageprovider/minio.yaml b/deploy/helm/templates/storageprovider/minio.yaml index 27e111babca..62ff8b65489 100644 --- a/deploy/helm/templates/storageprovider/minio.yaml +++ b/deploy/helm/templates/storageprovider/minio.yaml @@ -27,6 +27,16 @@ spec: csi.storage.k8s.io/node-publish-secret-name: {{ `{{ .CSIDriverSecretRef.Name }}` }} csi.storage.k8s.io/node-publish-secret-namespace: {{ `{{ .CSIDriverSecretRef.Namespace }}` }} + datasafedConfigTemplate: | + [storage] + type = s3 + provider = Minio + env_auth = false + access_key_id = {{ `{{ index .Parameters "accessKeyId" }}` }} + secret_access_key = {{ `{{ index .Parameters "secretAccessKey" }}` }} + endpoint = {{ `{{ index .Parameters "endpoint" }}` }} + root = {{ `{{ index .Parameters "bucket" }}` }} + parametersSchema: openAPIV3Schema: type: "object" diff --git a/deploy/helm/templates/storageprovider/obs.yaml b/deploy/helm/templates/storageprovider/obs.yaml index 9b5027a7a90..a61ea97b19a 100644 --- a/deploy/helm/templates/storageprovider/obs.yaml +++ b/deploy/helm/templates/storageprovider/obs.yaml @@ -33,6 +33,17 @@ spec: csi.storage.k8s.io/node-publish-secret-name: {{ `{{ .CSIDriverSecretRef.Name }}` }} csi.storage.k8s.io/node-publish-secret-namespace: {{ `{{ .CSIDriverSecretRef.Namespace }}` }} + datasafedConfigTemplate: | + [storage] + type = s3 + provider = HuaweiOBS + env_auth = false + access_key_id = {{ `{{ index .Parameters "accessKeyId" }}` }} + secret_access_key = {{ `{{ index .Parameters "secretAccessKey" }}` }} + region = {{ `{{ index .Parameters "region" }}` }} + endpoint = {{ `{{ printf "obs.%s.myhuaweicloud.com" .Parameters.region }}` }} + root = {{ `{{ index .Parameters "bucket" }}` }} + parametersSchema: openAPIV3Schema: type: "object" diff --git a/deploy/helm/templates/storageprovider/oss.yaml b/deploy/helm/templates/storageprovider/oss.yaml index 05f41d81b36..5ff3b780e95 100644 --- a/deploy/helm/templates/storageprovider/oss.yaml +++ b/deploy/helm/templates/storageprovider/oss.yaml @@ -32,6 +32,16 @@ spec: csi.storage.k8s.io/node-publish-secret-name: {{ `{{ .CSIDriverSecretRef.Name }}` }} csi.storage.k8s.io/node-publish-secret-namespace: {{ `{{ .CSIDriverSecretRef.Namespace }}` }} + datasafedConfigTemplate: | + [storage] + type = s3 + provider = Alibaba + env_auth = false + access_key_id = {{ `{{ index .Parameters "accessKeyId" }}` }} + secret_access_key = {{ `{{ index .Parameters "secretAccessKey" }}` }} + endpoint = {{ `{{- printf "oss-%s.aliyuncs.com" .Parameters.region) }}` }} + root = {{ `{{ index .Parameters "bucket" }}` }} + parametersSchema: openAPIV3Schema: type: "object" diff --git a/deploy/helm/templates/storageprovider/s3.yaml b/deploy/helm/templates/storageprovider/s3.yaml index cdc85e12b69..8bc6730d1b6 100644 --- a/deploy/helm/templates/storageprovider/s3.yaml +++ b/deploy/helm/templates/storageprovider/s3.yaml @@ -36,6 +36,17 @@ spec: csi.storage.k8s.io/node-publish-secret-name: {{ `{{ .CSIDriverSecretRef.Name }}` }} csi.storage.k8s.io/node-publish-secret-namespace: {{ `{{ .CSIDriverSecretRef.Namespace }}` }} + datasafedConfigTemplate: | + [storage] + type = s3 + provider = AWS + env_auth = false + access_key_id = {{ `{{ index .Parameters "accessKeyId" }}` }} + secret_access_key = {{ `{{ index .Parameters "secretAccessKey" }}` }} + region = {{ `{{ index .Parameters "region" }}` }} + endpoint = {{ `{{ index .Parameters "endpoint" }}` }} + root = {{ `{{ index .Parameters "bucket" }}` }} + parametersSchema: openAPIV3Schema: type: "object" diff --git a/go.mod b/go.mod index 501db1d9405..b1d1d74378c 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,6 @@ require ( github.com/go-logr/zapr v1.2.4 github.com/go-redis/redismock/v9 v9.0.3 github.com/go-sql-driver/mysql v1.7.1 - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 @@ -210,6 +209,7 @@ require ( github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-redis/redis/v7 v7.4.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-test/deep v1.1.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect From cf80026729ac4a1c2b24ac1a8ffb7b3f89b5e54c Mon Sep 17 00:00:00 2001 From: huangzhangshu <109708205+JashBook@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:05:35 +0800 Subject: [PATCH 40/58] chore: delete controller-gen cache (#5282) --- .github/localflows/cicd-local.yml | 3 --- .github/workflows/cicd-pull-request.yml | 3 --- .github/workflows/cicd-push.yml | 4 ---- 3 files changed, 10 deletions(-) diff --git a/.github/localflows/cicd-local.yml b/.github/localflows/cicd-local.yml index 9e419470684..beef2f90674 100644 --- a/.github/localflows/cicd-local.yml +++ b/.github/localflows/cicd-local.yml @@ -10,9 +10,6 @@ jobs: - uses: actions/checkout@v3 - name: make test run: | - mkdir -p ./bin - cp -r /go/bin/controller-gen ./bin/controller-gen - cp -r /go/bin/setup-envtest ./bin/setup-envtest make mod-vendor lint test diff --git a/.github/workflows/cicd-pull-request.yml b/.github/workflows/cicd-pull-request.yml index cf31693b5a6..be9746019b9 100644 --- a/.github/workflows/cicd-pull-request.yml +++ b/.github/workflows/cicd-pull-request.yml @@ -94,9 +94,6 @@ jobs: - uses: actions/checkout@v3 - name: make mod-vendor run: | - mkdir -p ./bin - cp -r /go/bin/controller-gen ./bin/controller-gen - cp -r /go/bin/setup-envtest ./bin/setup-envtest make mod-vendor - name: make lint diff --git a/.github/workflows/cicd-push.yml b/.github/workflows/cicd-push.yml index d90c6002772..68bce4811b5 100644 --- a/.github/workflows/cicd-push.yml +++ b/.github/workflows/cicd-push.yml @@ -133,10 +133,6 @@ jobs: - uses: actions/checkout@v3 - name: make manifests check run: | - mkdir -p ./bin - cp -r /go/bin/controller-gen ./bin/controller-gen - cp -r /go/bin/setup-envtest ./bin/setup-envtest - make manifests FILE_CHANGES=`git diff --name-only ${{ github.sha }}` if [[ ! -z "$FILE_CHANGES" ]]; then From 1004921ed64c27933cfcddc4ede418432be27a62 Mon Sep 17 00:00:00 2001 From: xuriwuyun Date: Wed, 27 Sep 2023 11:25:38 +0800 Subject: [PATCH 41/58] fix: lorryclient is nil check (#5284) --- controllers/apps/components/component.go | 2 +- internal/controllerutil/util.go | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/controllers/apps/components/component.go b/controllers/apps/components/component.go index 6b6982e773f..2c4c84eca14 100644 --- a/controllers/apps/components/component.go +++ b/controllers/apps/components/component.go @@ -1070,7 +1070,7 @@ func (c *rsmComponent) leaveMember4ScaleIn(reqCtx intctrlutil.RequestCtx, cli cl continue } - if lorryCli == nil { + if intctrlutil.IsNil(lorryCli) { // no lorry in the pod continue } diff --git a/internal/controllerutil/util.go b/internal/controllerutil/util.go index e3249ca7577..3d9804e60b8 100644 --- a/internal/controllerutil/util.go +++ b/internal/controllerutil/util.go @@ -21,6 +21,7 @@ package controllerutil import ( "context" + "reflect" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -94,3 +95,14 @@ func IsRSMEnabled() bool { } return true } + +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Ptr, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice: + return reflect.ValueOf(i).IsNil() + } + return false +} From 19352b5eb265d7b63f438722b7d2c7b49a9b5ef6 Mon Sep 17 00:00:00 2001 From: dingben Date: Wed, 27 Sep 2023 13:17:34 +0800 Subject: [PATCH 42/58] fix: the error message retunred (#5227) --- internal/cli/cmd/bench/bench.go | 7 +++++++ internal/cli/cmd/bench/pgbench.go | 4 ---- internal/cli/cmd/bench/sysbench.go | 4 ---- internal/cli/cmd/bench/ycsb.go | 4 ---- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/internal/cli/cmd/bench/bench.go b/internal/cli/cmd/bench/bench.go index 51c46b6377a..73f0e5be5fe 100644 --- a/internal/cli/cmd/bench/bench.go +++ b/internal/cli/cmd/bench/bench.go @@ -129,6 +129,10 @@ func (o *BenchBaseOptions) BaseValidate() error { return fmt.Errorf("port is required") } + if err := validateBenchmarkExist(o.factory, o.IOStreams, o.name); err != nil { + return err + } + return nil } @@ -461,6 +465,9 @@ func validateBenchmarkExist(factory cmdutil.Factory, streams genericclioptions.I bench.Print = false result, err := bench.Run() if err != nil { + if strings.Contains(err.Error(), "the server doesn't have a resource type") { + return fmt.Errorf("kubebench is not installed, please run `kbcli addon enable kubebench` to install it") + } return err } diff --git a/internal/cli/cmd/bench/pgbench.go b/internal/cli/cmd/bench/pgbench.go index 259a0b8b71b..eacc348d864 100644 --- a/internal/cli/cmd/bench/pgbench.go +++ b/internal/cli/cmd/bench/pgbench.go @@ -190,10 +190,6 @@ func (o *PgBenchOptions) Validate() error { return fmt.Errorf("database is required") } - if err := validateBenchmarkExist(o.factory, o.IOStreams, o.name); err != nil { - return err - } - return nil } diff --git a/internal/cli/cmd/bench/sysbench.go b/internal/cli/cmd/bench/sysbench.go index 29887d562b8..7c310eee601 100644 --- a/internal/cli/cmd/bench/sysbench.go +++ b/internal/cli/cmd/bench/sysbench.go @@ -206,10 +206,6 @@ func (o *SysBenchOptions) Validate() error { return fmt.Errorf("database is required") } - if err := validateBenchmarkExist(o.factory, o.IOStreams, o.name); err != nil { - return err - } - if len(o.Type) == 0 { return fmt.Errorf("type is required") } diff --git a/internal/cli/cmd/bench/ycsb.go b/internal/cli/cmd/bench/ycsb.go index 35feb7d2a04..c182e84759a 100644 --- a/internal/cli/cmd/bench/ycsb.go +++ b/internal/cli/cmd/bench/ycsb.go @@ -193,10 +193,6 @@ func (o *YcsbOptions) Validate() error { return fmt.Errorf("driver %s is not supported", o.Driver) } - if err := validateBenchmarkExist(o.factory, o.IOStreams, o.name); err != nil { - return err - } - if o.RecordCount < 0 { return fmt.Errorf("record count should be positive") } From 6ca6b7edb651cd7a17a3df52e24b26c7820b8b34 Mon Sep 17 00:00:00 2001 From: huangzhangshu <109708205+JashBook@users.noreply.github.com> Date: Wed, 27 Sep 2023 13:36:14 +0800 Subject: [PATCH 43/58] chore: auto set issue and pr milestone (#5290) --- .github/workflows/e2e-performance.yml | 4 +-- .github/workflows/milestone-set.yaml | 27 +++++++++++++++++++ .../workflows/pull-request-label-size.yaml | 14 +++------- 3 files changed, 33 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/milestone-set.yaml diff --git a/.github/workflows/e2e-performance.yml b/.github/workflows/e2e-performance.yml index 87ce6c20da4..720e1f5ce5c 100644 --- a/.github/workflows/e2e-performance.yml +++ b/.github/workflows/e2e-performance.yml @@ -68,7 +68,7 @@ jobs: performance: name: ${{ inputs.NODE_TYPE }} ${{ inputs.PERFORMANCE_TYPE }} needs: check - uses: apecloud/apecloud-cd/.github/workflows/performance-test-k8s.yaml@v0.1.23 + uses: apecloud/apecloud-cd/.github/workflows/performance-test-k8s.yml@v0.1.30 with: CLOUD_PROVIDER: "eks" CLUSTER_VERSION: "${{ inputs.K8S_VERSION }}" @@ -80,5 +80,5 @@ jobs: BENCH_TABLES: "${{ inputs.BENCH_TABLES }}" CLUSTER_STORAGE: "${{ inputs.CLUSTER_STORAGE }}" REGION: "${{ vars.REGION_AWK_EKS }}" - APECD_REF: "v0.1.23" + APECD_REF: "v0.1.30" secrets: inherit diff --git a/.github/workflows/milestone-set.yaml b/.github/workflows/milestone-set.yaml new file mode 100644 index 00000000000..3928dbf9938 --- /dev/null +++ b/.github/workflows/milestone-set.yaml @@ -0,0 +1,27 @@ +name: Set Milestone + +on: + issues: + types: + - opened + - closed + pull_request_target: + types: + - opened + - closed + + +jobs: + issue-milestone: + if: ${{ github.event_name == 'issues' }} + uses: apecloud/apecloud-cd/.github/workflows/issue-milestone.yml@v0.1.30 + with: + APECD_REF: "v0.1.30" + secrets: inherit + + pr-milestone: + if: ${{ github.event_name == 'pull_request_target' }} + uses: apecloud/apecloud-cd/.github/workflows/pull-request-milestone.yml@v0.1.30 + with: + APECD_REF: "v0.1.30" + secrets: inherit diff --git a/.github/workflows/pull-request-label-size.yaml b/.github/workflows/pull-request-label-size.yaml index 243487f0f21..775727450ea 100644 --- a/.github/workflows/pull-request-label-size.yaml +++ b/.github/workflows/pull-request-label-size.yaml @@ -4,16 +4,10 @@ on: pull_request_target: types: [ edited, opened, synchronize ] -env: - GITHUB_TOKEN: ${{ github.token }} jobs: size-label: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: set size label - run: | - bash .github/utils/utils.sh --type 15 \ - --github-token "${{ env.GITHUB_TOKEN }}" \ - --pr-number ${{ github.event.pull_request.number }} + uses: apecloud/apecloud-cd/.github/workflows/pull-request-label-size.yml@v0.1.30 + with: + APECD_REF: "v0.1.30" + secrets: inherit From 796e3c7fb48db8598e1cbcfcc75fb2754f9e410a Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Wed, 27 Sep 2023 14:09:41 +0800 Subject: [PATCH 44/58] fix: SyncEnvConfigmap panic for configuration controller (#5243) (#5280) --- controllers/apps/configuration/configuration_controller.go | 1 + internal/controller/configuration/pipeline.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/controllers/apps/configuration/configuration_controller.go b/controllers/apps/configuration/configuration_controller.go index 46c6fa5a951..5fde5c962bb 100644 --- a/controllers/apps/configuration/configuration_controller.go +++ b/controllers/apps/configuration/configuration_controller.go @@ -202,6 +202,7 @@ func isReconcileStatus(phase appsv1alpha1.ConfigurationPhase) bool { return phase == appsv1alpha1.CRunningPhase || phase == appsv1alpha1.CInitPhase || phase == appsv1alpha1.CPendingPhase || + phase == appsv1alpha1.CFailedPhase || phase == appsv1alpha1.CMergedPhase || phase == appsv1alpha1.CMergeFailedPhase || phase == appsv1alpha1.CUpgradingPhase || diff --git a/internal/controller/configuration/pipeline.go b/internal/controller/configuration/pipeline.go index 7df38f8b7fa..2a0e47e8035 100644 --- a/internal/controller/configuration/pipeline.go +++ b/internal/controller/configuration/pipeline.go @@ -379,7 +379,7 @@ func (p *updatePipeline) UpdateConfigVersion(revision string) *updatePipeline { func (p *updatePipeline) Sync() *updatePipeline { return p.Wrap(func() error { - if p.ConfigConstraintObj != nil { + if p.ConfigConstraintObj != nil && !p.isDone() { if err := SyncEnvConfigmap(*p.configSpec, p.newCM, &p.ConfigConstraintObj.Spec, p.Client, p.Context); err != nil { return err } From ce9ba66b3b28351343a9b7fdf88facff5c8adffc Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Wed, 27 Sep 2023 14:09:58 +0800 Subject: [PATCH 45/58] fix: component.BuildComponent painc when configuration.spec.ComponentName donot exist in the cluster.spec.componentSpecs (#5281) --- .../configuration/configuration_controller.go | 19 ++++++++++++++- .../configuration_controller_test.go | 23 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/controllers/apps/configuration/configuration_controller.go b/controllers/apps/configuration/configuration_controller.go index 5fde5c962bb..1d0881d8993 100644 --- a/controllers/apps/configuration/configuration_controller.go +++ b/controllers/apps/configuration/configuration_controller.go @@ -21,6 +21,7 @@ package configuration import ( "context" + "fmt" "strconv" "time" @@ -102,6 +103,10 @@ func (r *ConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Reques return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "failed to get related object.") } + if fetcherTask.ClusterComObj == nil || fetcherTask.ClusterDefComObj == nil { + return r.failWithInvalidComponent(configuration, reqCtx) + } + if err := r.runTasks(reqCtx, configuration, fetcherTask, tasks); err != nil { return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "failed to run configuration reconcile task.") } @@ -111,6 +116,17 @@ func (r *ConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Reques return intctrlutil.Reconciled() } +func (r *ConfigurationReconciler) failWithInvalidComponent(configuration *appsv1alpha1.Configuration, reqCtx intctrlutil.RequestCtx) (ctrl.Result, error) { + msg := fmt.Sprintf("not found cluster component or cluster definition component: [%s]", configuration.Spec.ComponentName) + reqCtx.Log.Error(fmt.Errorf(msg), "") + patch := client.MergeFrom(configuration.DeepCopy()) + configuration.Status.Message = msg + if err := r.Client.Status().Patch(reqCtx.Ctx, configuration, patch); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "failed to update configuration status.") + } + return intctrlutil.Reconciled() +} + func isAllReady(configuration *appsv1alpha1.Configuration) bool { for _, item := range configuration.Spec.ConfigItemDetails { itemStatus := configuration.Status.GetItemStatus(item.Name) @@ -140,8 +156,8 @@ func (r *ConfigurationReconciler) runTasks( return err } - revision := strconv.FormatInt(configuration.GetGeneration(), 10) patch := client.MergeFrom(configuration.DeepCopy()) + revision := strconv.FormatInt(configuration.GetGeneration(), 10) for _, task := range tasks { if err := task.Do(fetcher, synthesizedComp, revision); err != nil { task.Status.Phase = appsv1alpha1.CMergeFailedPhase @@ -158,6 +174,7 @@ func (r *ConfigurationReconciler) runTasks( } } + configuration.Status.Message = "" if len(errs) > 0 { configuration.Status.Message = utilerrors.NewAggregate(errs).Error() } diff --git a/controllers/apps/configuration/configuration_controller_test.go b/controllers/apps/configuration/configuration_controller_test.go index 9dc2fd73e97..8e711818f1e 100644 --- a/controllers/apps/configuration/configuration_controller_test.go +++ b/controllers/apps/configuration/configuration_controller_test.go @@ -90,6 +90,29 @@ var _ = Describe("Configuration Controller", func() { g.Expect(itemStatus.Phase).Should(BeEquivalentTo(appsv1alpha1.CFinishedPhase)) }, time.Second*60, time.Second*1).Should(Succeed()) }) + + It("Invalid component test", func() { + _, _, clusterObj, clusterVersionObj, synthesizedComp := mockReconcileResource() + + cfgKey := client.ObjectKey{ + Name: core.GenerateComponentConfigurationName(clusterName, "invalid-component"), + Namespace: testCtx.DefaultNamespace, + } + + Expect(initConfiguration(&intctrlutil.ResourceCtx{ + Client: k8sClient, + Context: ctx, + Namespace: testCtx.DefaultNamespace, + ClusterName: clusterName, + ComponentName: "invalid-component", + }, synthesizedComp, clusterObj, clusterVersionObj)).Should(Succeed()) + + Eventually(func(g Gomega) { + cfg := &appsv1alpha1.Configuration{} + g.Expect(k8sClient.Get(ctx, cfgKey, cfg)).Should(Succeed()) + g.Expect(cfg.Status.Message).Should(ContainSubstring("not found cluster component")) + }, time.Second*60, time.Second*1).Should(Succeed()) + }) }) }) From 354916caffb6f6e2eb562a95dc87866a6bf01ef0 Mon Sep 17 00:00:00 2001 From: Wei Cao Date: Wed, 27 Sep 2023 15:14:59 +0800 Subject: [PATCH 46/58] chore: workaround of goland code inspector bug which fails to infer generic pointer type --- controllers/apps/components/utils.go | 2 +- .../configuration/config_related_helper.go | 2 +- .../cli/cmd/builder/template/k8s_resource.go | 12 +-- internal/generics/type.go | 81 +++++++++++-------- internal/testutil/apps/common_util.go | 6 +- 5 files changed, 59 insertions(+), 44 deletions(-) diff --git a/controllers/apps/components/utils.go b/controllers/apps/components/utils.go index 65ea05f4f8c..e9a025440a2 100644 --- a/controllers/apps/components/utils.go +++ b/controllers/apps/components/utils.go @@ -54,7 +54,7 @@ var ( ) func listObjWithLabelsInNamespace[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]]( - ctx context.Context, cli client.Client, _ func(T, L), namespace string, labels client.MatchingLabels) ([]PT, error) { + ctx context.Context, cli client.Client, _ func(T, PT, L, PL), namespace string, labels client.MatchingLabels) ([]PT, error) { var objList L if err := cli.List(ctx, PL(&objList), labels, client.InNamespace(namespace)); err != nil { return nil, err diff --git a/controllers/apps/configuration/config_related_helper.go b/controllers/apps/configuration/config_related_helper.go index 932f1a11317..1686289c012 100644 --- a/controllers/apps/configuration/config_related_helper.go +++ b/controllers/apps/configuration/config_related_helper.go @@ -35,7 +35,7 @@ import ( "github.com/apecloud/kubeblocks/internal/generics" ) -func retrieveRelatedComponentsByConfigmap[T generics.Object, L generics.ObjList[T], PL generics.PObjList[T, L]](cli client.Client, ctx context.Context, configSpecName string, _ func(T, L), cfg client.ObjectKey, opts ...client.ListOption) ([]T, []string, error) { +func retrieveRelatedComponentsByConfigmap[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]](cli client.Client, ctx context.Context, configSpecName string, _ func(T, PT, L, PL), cfg client.ObjectKey, opts ...client.ListOption) ([]T, []string, error) { var objList L if err := cli.List(ctx, PL(&objList), opts...); err != nil { return nil, nil, err diff --git a/internal/cli/cmd/builder/template/k8s_resource.go b/internal/cli/cmd/builder/template/k8s_resource.go index 94d26e7007f..b0898bd29a9 100644 --- a/internal/cli/cmd/builder/template/k8s_resource.go +++ b/internal/cli/cmd/builder/template/k8s_resource.go @@ -32,8 +32,8 @@ import ( type MatchResourceFunc func(object client.Object) bool -func CustomizedObjFromYaml[T generics.Object, PT generics.PObject[T], L generics.ObjList[T]](filePath string, signature func(T, L)) (PT, error) { - objList, err := CustomizedObjectListFromYaml[T, PT, L](filePath, signature) +func CustomizedObjFromYaml[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]](filePath string, signature func(T, PT, L, PL)) (PT, error) { + objList, err := CustomizedObjectListFromYaml[T, PT, L, PL](filePath, signature) if err != nil { return nil, err } @@ -43,7 +43,7 @@ func CustomizedObjFromYaml[T generics.Object, PT generics.PObject[T], L generics return objList[0], nil } -func CustomizedObjectListFromYaml[T generics.Object, PT generics.PObject[T], L generics.ObjList[T]](yamlfile string, signature func(T, L)) ([]PT, error) { +func CustomizedObjectListFromYaml[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]](yamlfile string, signature func(T, PT, L, PL)) ([]PT, error) { objBytes, err := os.ReadFile(yamlfile) if err != nil { return nil, err @@ -58,12 +58,12 @@ func CustomizedObjectListFromYaml[T generics.Object, PT generics.PObject[T], L g if err != nil { return nil, err } - objList = append(objList, CreateTypedObjectFromYamlByte[T, PT, L](doc, signature)) + objList = append(objList, CreateTypedObjectFromYamlByte[T, PT, L, PL](doc, signature)) } return objList, nil } -func CreateTypedObjectFromYamlByte[T generics.Object, PT generics.PObject[T], L generics.ObjList[T]](yamlBytes []byte, _ func(T, L)) PT { +func CreateTypedObjectFromYamlByte[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]](yamlBytes []byte, _ func(T, PT, L, PL)) PT { var obj PT if err := yaml.Unmarshal(yamlBytes, &obj); err != nil { return nil @@ -71,7 +71,7 @@ func CreateTypedObjectFromYamlByte[T generics.Object, PT generics.PObject[T], L return obj } -func GetTypedResourceObjectBySignature[T generics.Object, PT generics.PObject[T], L generics.ObjList[T]](objects []client.Object, _ func(T, L), matchers ...MatchResourceFunc) PT { +func GetTypedResourceObjectBySignature[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]](objects []client.Object, _ func(T, PT, L, PL), matchers ...MatchResourceFunc) PT { for _, object := range objects { obj, ok := object.(PT) if !ok { diff --git a/internal/generics/type.go b/internal/generics/type.go index b10ce1a2fd2..94f8536cbba 100644 --- a/internal/generics/type.go +++ b/internal/generics/type.go @@ -58,59 +58,74 @@ type PObjList[T Object, L ObjList[T]] interface { } // signature is used as an argument passed to generic functions for type deduction. +// Goland IDE 2023.2.1 and 2023.2.2 code inspector has a bug to infer pointer type like PObject and PObjList from Object and ObjectList. +// To workaround this bug, we also pass the pointer type to generic functions in signature. -var SecretSignature = func(_ corev1.Secret, _ corev1.SecretList) {} -var ServiceSignature = func(_ corev1.Service, _ corev1.ServiceList) {} -var PersistentVolumeClaimSignature = func(_ corev1.PersistentVolumeClaim, _ corev1.PersistentVolumeClaimList) {} -var PodSignature = func(_ corev1.Pod, _ corev1.PodList) {} -var EventSignature = func(_ corev1.Event, _ corev1.EventList) {} -var ConfigMapSignature = func(_ corev1.ConfigMap, _ corev1.ConfigMapList) {} -var EndpointsSignature = func(_ corev1.Endpoints, _ corev1.EndpointsList) {} +var SecretSignature = func(_ corev1.Secret, _ *corev1.Secret, _ corev1.SecretList, _ *corev1.SecretList) {} +var ServiceSignature = func(_ corev1.Service, _ *corev1.Service, _ corev1.ServiceList, _ *corev1.ServiceList) {} +var PersistentVolumeClaimSignature = func(_ corev1.PersistentVolumeClaim, _ *corev1.PersistentVolumeClaim, _ corev1.PersistentVolumeClaimList, _ *corev1.PersistentVolumeClaimList) { +} +var PodSignature = func(_ corev1.Pod, _ *corev1.Pod, _ corev1.PodList, _ *corev1.PodList) {} +var EventSignature = func(_ corev1.Event, _ *corev1.Event, _ corev1.EventList, _ *corev1.EventList) {} +var ConfigMapSignature = func(_ corev1.ConfigMap, _ *corev1.ConfigMap, _ corev1.ConfigMapList, _ *corev1.ConfigMapList) {} +var EndpointsSignature = func(_ corev1.Endpoints, _ *corev1.Endpoints, _ corev1.EndpointsList, _ *corev1.EndpointsList) {} -var RSMSignature = func(_ workloads.ReplicatedStateMachine, _ workloads.ReplicatedStateMachineList) {} -var StatefulSetSignature = func(A appsv1.StatefulSet, B appsv1.StatefulSetList) {} -var DeploymentSignature = func(_ appsv1.Deployment, _ appsv1.DeploymentList) {} -var ReplicaSetSignature = func(_ appsv1.ReplicaSet, _ appsv1.ReplicaSetList) {} +var RSMSignature = func(_ workloads.ReplicatedStateMachine, _ *workloads.ReplicatedStateMachine, _ workloads.ReplicatedStateMachineList, _ *workloads.ReplicatedStateMachineList) { +} +var StatefulSetSignature = func(_ appsv1.StatefulSet, _ *appsv1.StatefulSet, _ appsv1.StatefulSetList, _ *appsv1.StatefulSetList) { +} +var DeploymentSignature = func(_ appsv1.Deployment, _ *appsv1.Deployment, _ appsv1.DeploymentList, _ *appsv1.DeploymentList) {} +var ReplicaSetSignature = func(_ appsv1.ReplicaSet, _ *appsv1.ReplicaSet, _ appsv1.ReplicaSetList, _ *appsv1.ReplicaSetList) {} -var JobSignature = func(_ batchv1.Job, _ batchv1.JobList) {} -var CronJobSignature = func(_ batchv1.CronJob, _ batchv1.CronJobList) {} +var JobSignature = func(_ batchv1.Job, _ *batchv1.Job, _ batchv1.JobList, _ *batchv1.JobList) {} +var CronJobSignature = func(_ batchv1.CronJob, _ *batchv1.CronJob, _ batchv1.CronJobList, _ *batchv1.CronJobList) {} -var PodDisruptionBudgetSignature = func(_ policyv1.PodDisruptionBudget, _ policyv1.PodDisruptionBudgetList) { +var PodDisruptionBudgetSignature = func(_ policyv1.PodDisruptionBudget, _ *policyv1.PodDisruptionBudget, _ policyv1.PodDisruptionBudgetList, _ *policyv1.PodDisruptionBudgetList) { } -var StorageClassSignature = func(_ storagev1.StorageClass, _ storagev1.StorageClassList) {} -var CSIDriverSignature = func(_ storagev1.CSIDriver, _ storagev1.CSIDriverList) {} +var StorageClassSignature = func(_ storagev1.StorageClass, _ *storagev1.StorageClass, _ storagev1.StorageClassList, _ *storagev1.StorageClassList) { +} +var CSIDriverSignature = func(_ storagev1.CSIDriver, _ *storagev1.CSIDriver, _ storagev1.CSIDriverList, _ *storagev1.CSIDriverList) { +} -var VolumeSnapshotSignature = func(_ snapshotv1.VolumeSnapshot, _ snapshotv1.VolumeSnapshotList) {} +var VolumeSnapshotSignature = func(_ snapshotv1.VolumeSnapshot, _ *snapshotv1.VolumeSnapshot, _ snapshotv1.VolumeSnapshotList, _ *snapshotv1.VolumeSnapshotList) { +} -var ClusterSignature = func(_ appsv1alpha1.Cluster, _ appsv1alpha1.ClusterList) {} -var ClusterVersionSignature = func(_ appsv1alpha1.ClusterVersion, _ appsv1alpha1.ClusterVersionList) {} -var ClusterDefinitionSignature = func(_ appsv1alpha1.ClusterDefinition, _ appsv1alpha1.ClusterDefinitionList) { +var ClusterSignature = func(_ appsv1alpha1.Cluster, _ *appsv1alpha1.Cluster, _ appsv1alpha1.ClusterList, _ *appsv1alpha1.ClusterList) { +} +var ClusterVersionSignature = func(_ appsv1alpha1.ClusterVersion, _ *appsv1alpha1.ClusterVersion, _ appsv1alpha1.ClusterVersionList, _ *appsv1alpha1.ClusterVersionList) { } -var OpsRequestSignature = func(_ appsv1alpha1.OpsRequest, _ appsv1alpha1.OpsRequestList) {} -var ConfigConstraintSignature = func(_ appsv1alpha1.ConfigConstraint, _ appsv1alpha1.ConfigConstraintList) { +var ClusterDefinitionSignature = func(_ appsv1alpha1.ClusterDefinition, _ *appsv1alpha1.ClusterDefinition, _ appsv1alpha1.ClusterDefinitionList, _ *appsv1alpha1.ClusterDefinitionList) { +} +var OpsRequestSignature = func(_ appsv1alpha1.OpsRequest, _ *appsv1alpha1.OpsRequest, _ appsv1alpha1.OpsRequestList, _ *appsv1alpha1.OpsRequestList) { +} +var ConfigConstraintSignature = func(_ appsv1alpha1.ConfigConstraint, _ *appsv1alpha1.ConfigConstraint, _ appsv1alpha1.ConfigConstraintList, _ *appsv1alpha1.ConfigConstraintList) { } -var BackupPolicyTemplateSignature = func(_ appsv1alpha1.BackupPolicyTemplate, _ appsv1alpha1.BackupPolicyTemplateList) { +var BackupPolicyTemplateSignature = func(_ appsv1alpha1.BackupPolicyTemplate, _ *appsv1alpha1.BackupPolicyTemplate, _ appsv1alpha1.BackupPolicyTemplateList, _ *appsv1alpha1.BackupPolicyTemplateList) { +} +var BackupPolicySignature = func(_ dataprotectionv1alpha1.BackupPolicy, _ *dataprotectionv1alpha1.BackupPolicy, _ dataprotectionv1alpha1.BackupPolicyList, _ *dataprotectionv1alpha1.BackupPolicyList) { } -var BackupPolicySignature = func(_ dataprotectionv1alpha1.BackupPolicy, _ dataprotectionv1alpha1.BackupPolicyList) { +var BackupSignature = func(_ dataprotectionv1alpha1.Backup, _ *dataprotectionv1alpha1.Backup, _ dataprotectionv1alpha1.BackupList, _ *dataprotectionv1alpha1.BackupList) { } -var BackupSignature = func(_ dataprotectionv1alpha1.Backup, _ dataprotectionv1alpha1.BackupList) { +var BackupToolSignature = func(_ dataprotectionv1alpha1.BackupTool, _ *dataprotectionv1alpha1.BackupTool, _ dataprotectionv1alpha1.BackupToolList, _ *dataprotectionv1alpha1.BackupToolList) { } -var BackupToolSignature = func(_ dataprotectionv1alpha1.BackupTool, _ dataprotectionv1alpha1.BackupToolList) { +var RestoreJobSignature = func(_ dataprotectionv1alpha1.RestoreJob, _ *dataprotectionv1alpha1.RestoreJob, _ dataprotectionv1alpha1.RestoreJobList, _ *dataprotectionv1alpha1.RestoreJobList) { } -var RestoreJobSignature = func(_ dataprotectionv1alpha1.RestoreJob, _ dataprotectionv1alpha1.RestoreJobList) { +var BackupRepoSignature = func(_ dataprotectionv1alpha1.BackupRepo, _ *dataprotectionv1alpha1.BackupRepo, _ dataprotectionv1alpha1.BackupRepoList, _ *dataprotectionv1alpha1.BackupRepoList) { } -var BackupRepoSignature = func(_ dataprotectionv1alpha1.BackupRepo, _ dataprotectionv1alpha1.BackupRepoList) { +var AddonSignature = func(_ extensionsv1alpha1.Addon, _ *extensionsv1alpha1.Addon, _ extensionsv1alpha1.AddonList, _ *extensionsv1alpha1.AddonList) { } -var AddonSignature = func(_ extensionsv1alpha1.Addon, _ extensionsv1alpha1.AddonList) { +var ComponentResourceConstraintSignature = func(_ appsv1alpha1.ComponentResourceConstraint, _ *appsv1alpha1.ComponentResourceConstraint, _ appsv1alpha1.ComponentResourceConstraintList, _ *appsv1alpha1.ComponentResourceConstraintList) { +} +var ComponentClassDefinitionSignature = func(_ appsv1alpha1.ComponentClassDefinition, _ *appsv1alpha1.ComponentClassDefinition, _ appsv1alpha1.ComponentClassDefinitionList, _ *appsv1alpha1.ComponentClassDefinitionList) { } -var ComponentResourceConstraintSignature = func(_ appsv1alpha1.ComponentResourceConstraint, _ appsv1alpha1.ComponentResourceConstraintList) {} -var ComponentClassDefinitionSignature = func(_ appsv1alpha1.ComponentClassDefinition, _ appsv1alpha1.ComponentClassDefinitionList) {} -var StorageProviderSignature = func(_ storagev1alpha1.StorageProvider, _ storagev1alpha1.StorageProviderList) {} +var StorageProviderSignature = func(_ storagev1alpha1.StorageProvider, _ *storagev1alpha1.StorageProvider, _ storagev1alpha1.StorageProviderList, _ *storagev1alpha1.StorageProviderList) { +} -var ConfigurationSignature = func(_ appsv1alpha1.Configuration, _ appsv1alpha1.ConfigurationList) {} +var ConfigurationSignature = func(_ appsv1alpha1.Configuration, _ *appsv1alpha1.Configuration, _ appsv1alpha1.ConfigurationList, _ *appsv1alpha1.ConfigurationList) { +} func ToGVK(object client.Object) schema.GroupVersionKind { t := reflect.TypeOf(object) diff --git a/internal/testutil/apps/common_util.go b/internal/testutil/apps/common_util.go index 14dbacc4b04..184a164bad7 100644 --- a/internal/testutil/apps/common_util.go +++ b/internal/testutil/apps/common_util.go @@ -154,7 +154,7 @@ func CheckObj[T intctrlutil.Object, PT intctrlutil.PObject[T]](testCtx *testutil func List[T intctrlutil.Object, PT intctrlutil.PObject[T], L intctrlutil.ObjList[T], PL intctrlutil.PObjList[T, L]]( - testCtx *testutil.TestContext, _ func(T, L), opt ...client.ListOption) func(gomega.Gomega) []T { + testCtx *testutil.TestContext, _ func(T, PT, L, PL), opt ...client.ListOption) func(gomega.Gomega) []T { return func(g gomega.Gomega) []T { var objList L g.Expect(testCtx.Cli.List(testCtx.Ctx, PL(&objList), opt...)).To(gomega.Succeed()) @@ -290,7 +290,7 @@ func DeleteObject[T intctrlutil.Object, PT intctrlutil.PObject[T]]( // ClearResources clears all resources of the given type T satisfying the input ListOptions. func ClearResources[T intctrlutil.Object, PT intctrlutil.PObject[T], L intctrlutil.ObjList[T], PL intctrlutil.PObjList[T, L]]( - testCtx *testutil.TestContext, funcSig func(T, L), opts ...client.DeleteAllOfOption) { + testCtx *testutil.TestContext, funcSig func(T, PT, L, PL), opts ...client.DeleteAllOfOption) { ClearResourcesWithRemoveFinalizerOption[T, PT, L, PL](testCtx, funcSig, false, opts...) } @@ -298,7 +298,7 @@ func ClearResources[T intctrlutil.Object, PT intctrlutil.PObject[T], // removeFinalizer specifier, and satisfying the input ListOptions. func ClearResourcesWithRemoveFinalizerOption[T intctrlutil.Object, PT intctrlutil.PObject[T], L intctrlutil.ObjList[T], PL intctrlutil.PObjList[T, L]]( - testCtx *testutil.TestContext, _ func(T, L), removeFinalizer bool, opts ...client.DeleteAllOfOption) { + testCtx *testutil.TestContext, _ func(T, PT, L, PL), removeFinalizer bool, opts ...client.DeleteAllOfOption) { var ( obj T objList L From 0894e423c3406af5887e5dec7156ce39069ccc69 Mon Sep 17 00:00:00 2001 From: huangzhangshu <109708205+JashBook@users.noreply.github.com> Date: Wed, 27 Sep 2023 17:24:15 +0800 Subject: [PATCH 47/58] chore: auto move pr/issue to next milestone (#5295) --- .github/workflows/milestone-set.yaml | 16 ++++++++++++---- .github/workflows/milestoneclose.yml | 8 +++++++- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.github/workflows/milestone-set.yaml b/.github/workflows/milestone-set.yaml index 3928dbf9938..12dde122577 100644 --- a/.github/workflows/milestone-set.yaml +++ b/.github/workflows/milestone-set.yaml @@ -1,6 +1,7 @@ name: Set Milestone on: + workflow_dispatch: issues: types: - opened @@ -14,14 +15,21 @@ on: jobs: issue-milestone: if: ${{ github.event_name == 'issues' }} - uses: apecloud/apecloud-cd/.github/workflows/issue-milestone.yml@v0.1.30 + uses: apecloud/apecloud-cd/.github/workflows/issue-milestone.yml@v0.1.31 with: - APECD_REF: "v0.1.30" + APECD_REF: "v0.1.31" secrets: inherit pr-milestone: if: ${{ github.event_name == 'pull_request_target' }} - uses: apecloud/apecloud-cd/.github/workflows/pull-request-milestone.yml@v0.1.30 + uses: apecloud/apecloud-cd/.github/workflows/pull-request-milestone.yml@v0.1.31 with: - APECD_REF: "v0.1.30" + APECD_REF: "v0.1.31" + secrets: inherit + + move_milestone: + if: ${{ github.event_name == 'workflow_dispatch' }} + uses: apecloud/apecloud-cd/.github/workflows/milestone-move.yml@v0.1.31 + with: + APECD_REF: "v0.1.31" secrets: inherit diff --git a/.github/workflows/milestoneclose.yml b/.github/workflows/milestoneclose.yml index b0d549aaa9f..b4d1d5750f4 100644 --- a/.github/workflows/milestoneclose.yml +++ b/.github/workflows/milestoneclose.yml @@ -12,7 +12,13 @@ env: PROJECT_NUMBER: 2 jobs: - move_issues: + move_milestone: + uses: apecloud/apecloud-cd/.github/workflows/milestone-move.yml@v0.1.31 + with: + APECD_REF: "v0.1.31" + secrets: inherit + + move_issues: runs-on: ubuntu-latest steps: - name: Checkout repo From 47266b9e794a6ea6a55c13cb0c474a34ebbd4bf2 Mon Sep 17 00:00:00 2001 From: Leon Date: Wed, 27 Sep 2023 20:13:17 +0800 Subject: [PATCH 48/58] chore: upgrade controller runtime to v0.15.2 (#5270) --- Makefile | 4 +- apis/apps/v1alpha1/cluster_webhook.go | 19 +- .../v1alpha1/clusterdefinition_webhook.go | 13 +- apis/apps/v1alpha1/clusterversion_webhook.go | 15 +- apis/apps/v1alpha1/opsrequest_webhook.go | 19 +- .../v1alpha1/servicedescriptor_webhook.go | 13 +- .../replicatedstatemachine_webhook.go | 13 +- cmd/manager/main.go | 6 - ...s.kubeblocks.io_backuppolicytemplates.yaml | 3 +- ...apps.kubeblocks.io_clusterdefinitions.yaml | 437 ++++++--- .../bases/apps.kubeblocks.io_clusters.yaml | 11 +- .../apps.kubeblocks.io_clusterversions.yaml | 237 +++-- ...beblocks.io_componentclassdefinitions.yaml | 3 +- ...locks.io_componentresourceconstraints.yaml | 3 +- .../apps.kubeblocks.io_configconstraints.yaml | 6 +- .../apps.kubeblocks.io_configurations.yaml | 3 +- .../bases/apps.kubeblocks.io_opsrequests.yaml | 13 +- ...apps.kubeblocks.io_servicedescriptors.yaml | 19 +- ...otection.kubeblocks.io_backuppolicies.yaml | 6 +- ...aprotection.kubeblocks.io_backuprepos.yaml | 5 +- .../dataprotection.kubeblocks.io_backups.yaml | 3 +- ...aprotection.kubeblocks.io_backuptools.yaml | 12 +- ...aprotection.kubeblocks.io_restorejobs.yaml | 27 +- .../extensions.kubeblocks.io_addons.yaml | 3 +- ...torage.kubeblocks.io_storageproviders.yaml | 3 +- ...kubeblocks.io_replicatedstatemachines.yaml | 551 ++++++++--- config/rbac/role.yaml | 1 - config/webhook/manifests.yaml | 2 - controllers/apps/cluster_controller.go | 11 +- controllers/apps/components/types.go | 5 +- controllers/apps/opsrequest_controller.go | 9 +- controllers/apps/systemaccount_controller.go | 5 +- .../dataprotection/backup_controller.go | 5 +- .../dataprotection/backuppolicy_controller.go | 6 +- .../dataprotection/backuprepo_controller.go | 16 +- controllers/extensions/addon_controller.go | 5 +- .../storage/storageprovider_controller.go | 5 +- .../replicatedstatemachine_controller.go | 9 +- deploy/helm/config/rbac/role.yaml | 1 - ...s.kubeblocks.io_backuppolicytemplates.yaml | 3 +- ...apps.kubeblocks.io_clusterdefinitions.yaml | 437 ++++++--- .../crds/apps.kubeblocks.io_clusters.yaml | 11 +- .../apps.kubeblocks.io_clusterversions.yaml | 237 +++-- ...beblocks.io_componentclassdefinitions.yaml | 3 +- ...locks.io_componentresourceconstraints.yaml | 3 +- .../apps.kubeblocks.io_configconstraints.yaml | 6 +- .../apps.kubeblocks.io_configurations.yaml | 3 +- .../crds/apps.kubeblocks.io_opsrequests.yaml | 13 +- ...apps.kubeblocks.io_servicedescriptors.yaml | 19 +- ...otection.kubeblocks.io_backuppolicies.yaml | 6 +- ...aprotection.kubeblocks.io_backuprepos.yaml | 5 +- .../dataprotection.kubeblocks.io_backups.yaml | 3 +- ...aprotection.kubeblocks.io_backuptools.yaml | 12 +- ...aprotection.kubeblocks.io_restorejobs.yaml | 27 +- .../crds/extensions.kubeblocks.io_addons.yaml | 3 +- ...torage.kubeblocks.io_storageproviders.yaml | 3 +- ...kubeblocks.io_replicatedstatemachines.yaml | 551 ++++++++--- go.mod | 226 ++--- go.sum | 906 ++++++------------ hack/client-sdk-gen.sh | 5 +- internal/cli/cloudprovider/k3d.go | 2 +- internal/cli/cmd/backuprepo/create.go | 2 +- .../cli/cmd/builder/template/mock_client.go | 9 + internal/cli/cmd/class/create.go | 2 +- internal/cli/cmd/cli.go | 4 +- internal/cli/cmd/cluster/create.go | 4 +- internal/cli/cmd/cluster/create_util.go | 2 +- internal/cli/cmd/cluster/label.go | 6 - internal/cli/edit/custom_edit.go | 8 +- internal/cli/patch/patch.go | 11 - internal/cli/util/completion.go | 6 +- internal/cli/util/flags/flags.go | 2 +- .../controller/handler/handler_builder.go | 4 +- .../handler/handler_builder_test.go | 8 +- internal/webhook/pod_admission.go | 82 -- internal/webhook/webhook.go | 43 - lorry/binding/etcd/etcd_test.go | 4 +- pkg/client/clientset/versioned/doc.go | 20 - .../fake/fake_backuppolicytemplate.go | 5 +- .../typed/apps/v1alpha1/fake/fake_cluster.go | 5 +- .../v1alpha1/fake/fake_clusterdefinition.go | 5 +- .../apps/v1alpha1/fake/fake_clusterversion.go | 5 +- .../fake/fake_componentclassdefinition.go | 5 +- .../fake/fake_componentresourceconstraint.go | 5 +- .../v1alpha1/fake/fake_configconstraint.go | 5 +- .../apps/v1alpha1/fake/fake_opsrequest.go | 5 +- .../v1alpha1/fake/fake_servicedescriptor.go | 5 +- .../v1alpha1/fake/fake_backup.go | 5 +- .../v1alpha1/fake/fake_backuppolicy.go | 5 +- .../v1alpha1/fake/fake_backuprepo.go | 5 +- .../v1alpha1/fake/fake_backuptool.go | 5 +- .../v1alpha1/fake/fake_restorejob.go | 5 +- .../extensions/v1alpha1/fake/fake_addon.go | 5 +- .../v1alpha1/fake/fake_storageprovider.go | 5 +- .../fake/fake_replicatedstatemachine.go | 5 +- .../informers/externalversions/factory.go | 4 +- 96 files changed, 2545 insertions(+), 1772 deletions(-) delete mode 100644 internal/webhook/pod_admission.go delete mode 100644 internal/webhook/webhook.go delete mode 100644 pkg/client/clientset/versioned/doc.go diff --git a/Makefile b/Makefile index 51a549ffed7..a85828a59a4 100644 --- a/Makefile +++ b/Makefile @@ -477,8 +477,8 @@ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ENVTEST ?= $(LOCALBIN)/setup-envtest ## Tool Versions -KUSTOMIZE_VERSION ?= v4.5.7 -CONTROLLER_TOOLS_VERSION ?= v0.9.0 +KUSTOMIZE_VERSION ?= v5.1.1 +CONTROLLER_TOOLS_VERSION ?= v0.12.1 CUE_VERSION ?= v0.4.3 KUSTOMIZE_INSTALL_SCRIPT ?= "$(GITHUB_PROXY)https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" diff --git a/apis/apps/v1alpha1/cluster_webhook.go b/apis/apps/v1alpha1/cluster_webhook.go index df18f4e1318..471afad604c 100644 --- a/apis/apps/v1alpha1/cluster_webhook.go +++ b/apis/apps/v1alpha1/cluster_webhook.go @@ -30,6 +30,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -47,31 +48,31 @@ func (r *Cluster) SetupWebhookWithManager(mgr ctrl.Manager) error { var _ webhook.Validator = &Cluster{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateCreate() error { +func (r *Cluster) ValidateCreate() (admission.Warnings, error) { clusterlog.Info("validate create", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateUpdate(old runtime.Object) error { +func (r *Cluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { clusterlog.Info("validate update", "name", r.Name) lastCluster := old.(*Cluster) if lastCluster.Spec.ClusterDefRef != r.Spec.ClusterDefRef { - return newInvalidError(ClusterKind, r.Name, "spec.clusterDefinitionRef", "clusterDefinitionRef is immutable, you can not update it. ") + return nil, newInvalidError(ClusterKind, r.Name, "spec.clusterDefinitionRef", "clusterDefinitionRef is immutable, you can not update it. ") } if err := r.validate(); err != nil { - return err + return nil, err } - return r.validateVolumeClaimTemplates(lastCluster) + return nil, r.validateVolumeClaimTemplates(lastCluster) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateDelete() error { +func (r *Cluster) ValidateDelete() (admission.Warnings, error) { clusterlog.Info("validate delete", "name", r.Name) if r.Spec.TerminationPolicy == DoNotTerminate { - return fmt.Errorf("the deletion for a cluster with DoNotTerminate termination policy is denied") + return nil, fmt.Errorf("the deletion for a cluster with DoNotTerminate termination policy is denied") } - return nil + return nil, nil } // validateVolumeClaimTemplates volumeClaimTemplates is forbidden modification except for storage size. diff --git a/apis/apps/v1alpha1/clusterdefinition_webhook.go b/apis/apps/v1alpha1/clusterdefinition_webhook.go index 6e3ef387fe8..f5290a581e4 100644 --- a/apis/apps/v1alpha1/clusterdefinition_webhook.go +++ b/apis/apps/v1alpha1/clusterdefinition_webhook.go @@ -28,6 +28,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -82,21 +83,21 @@ func (r *ClusterDefinition) Default() { var _ webhook.Validator = &ClusterDefinition{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ClusterDefinition) ValidateCreate() error { +func (r *ClusterDefinition) ValidateCreate() (admission.Warnings, error) { clusterdefinitionlog.Info("validate create", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ClusterDefinition) ValidateUpdate(old runtime.Object) error { +func (r *ClusterDefinition) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { clusterdefinitionlog.Info("validate update", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ClusterDefinition) ValidateDelete() error { +func (r *ClusterDefinition) ValidateDelete() (admission.Warnings, error) { clusterdefinitionlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } // Validate ClusterDefinition.spec is legal diff --git a/apis/apps/v1alpha1/clusterversion_webhook.go b/apis/apps/v1alpha1/clusterversion_webhook.go index 16c37b92b30..ba2727fba4f 100644 --- a/apis/apps/v1alpha1/clusterversion_webhook.go +++ b/apis/apps/v1alpha1/clusterversion_webhook.go @@ -29,6 +29,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -46,26 +47,26 @@ func (r *ClusterVersion) SetupWebhookWithManager(mgr ctrl.Manager) error { var _ webhook.Validator = &ClusterVersion{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ClusterVersion) ValidateCreate() error { +func (r *ClusterVersion) ValidateCreate() (admission.Warnings, error) { clusterversionlog.Info("validate create", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ClusterVersion) ValidateUpdate(old runtime.Object) error { +func (r *ClusterVersion) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { clusterversionlog.Info("validate update", "name", r.Name) // determine whether r.spec content is modified lastClusterVersion := old.(*ClusterVersion) if !reflect.DeepEqual(lastClusterVersion.Spec, r.Spec) { - return newInvalidError(ClusterVersionKind, r.Name, "", "ClusterVersion.spec is immutable, you can not update it.") + return nil, newInvalidError(ClusterVersionKind, r.Name, "", "ClusterVersion.spec is immutable, you can not update it.") } - return nil + return nil, nil } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ClusterVersion) ValidateDelete() error { +func (r *ClusterVersion) ValidateDelete() (admission.Warnings, error) { clusterversionlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } // Validate ClusterVersion.spec is legal diff --git a/apis/apps/v1alpha1/opsrequest_webhook.go b/apis/apps/v1alpha1/opsrequest_webhook.go index 7655db8f903..a345a2f4592 100644 --- a/apis/apps/v1alpha1/opsrequest_webhook.go +++ b/apis/apps/v1alpha1/opsrequest_webhook.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/apecloud/kubeblocks/internal/constant" ) @@ -59,38 +60,38 @@ func (r *OpsRequest) SetupWebhookWithManager(mgr ctrl.Manager) error { var _ webhook.Validator = &OpsRequest{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *OpsRequest) ValidateCreate() error { +func (r *OpsRequest) ValidateCreate() (admission.Warnings, error) { opsRequestLog.Info("validate create", "name", r.Name) - return r.validateEntry(true) + return nil, r.validateEntry(true) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *OpsRequest) ValidateUpdate(old runtime.Object) error { +func (r *OpsRequest) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { opsRequestLog.Info("validate update", "name", r.Name) lastOpsRequest := old.(*OpsRequest).DeepCopy() // if no spec updated, we should skip validation. // if not, we can not delete the OpsRequest when cluster has been deleted. // because when cluster not existed, r.validate will report an error. if reflect.DeepEqual(lastOpsRequest.Spec, r.Spec) { - return nil + return nil, nil } if r.IsComplete() { - return fmt.Errorf("update OpsRequest: %s is forbidden when status.Phase is %s", r.Name, r.Status.Phase) + return nil, fmt.Errorf("update OpsRequest: %s is forbidden when status.Phase is %s", r.Name, r.Status.Phase) } // Keep the cancel consistent between the two opsRequest for comparing the diff. lastOpsRequest.Spec.Cancel = r.Spec.Cancel if !reflect.DeepEqual(lastOpsRequest.Spec, r.Spec) && r.Status.Phase != "" { - return fmt.Errorf("update OpsRequest: %s is forbidden except for cancel when status.Phase is %s", r.Name, r.Status.Phase) + return nil, fmt.Errorf("update OpsRequest: %s is forbidden except for cancel when status.Phase is %s", r.Name, r.Status.Phase) } - return r.validateEntry(false) + return nil, r.validateEntry(false) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *OpsRequest) ValidateDelete() error { +func (r *OpsRequest) ValidateDelete() (admission.Warnings, error) { opsRequestLog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } // IsComplete checks if opsRequest has been completed. diff --git a/apis/apps/v1alpha1/servicedescriptor_webhook.go b/apis/apps/v1alpha1/servicedescriptor_webhook.go index 84fc9caa630..4ca9f42e7af 100644 --- a/apis/apps/v1alpha1/servicedescriptor_webhook.go +++ b/apis/apps/v1alpha1/servicedescriptor_webhook.go @@ -27,6 +27,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -57,24 +58,24 @@ func (r *ServiceDescriptor) Default() { var _ webhook.Validator = &ServiceDescriptor{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ServiceDescriptor) ValidateCreate() error { +func (r *ServiceDescriptor) ValidateCreate() (admission.Warnings, error) { servicedescriptorlog.Info("validate create", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ServiceDescriptor) ValidateUpdate(old runtime.Object) error { +func (r *ServiceDescriptor) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { servicedescriptorlog.Info("validate update", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ServiceDescriptor) ValidateDelete() error { +func (r *ServiceDescriptor) ValidateDelete() (admission.Warnings, error) { servicedescriptorlog.Info("validate delete", "name", r.Name) - return r.validate() + return nil, r.validate() } func (r *ServiceDescriptor) validate() error { diff --git a/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go b/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go index 9247c1d3c55..52565c5ac05 100644 --- a/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go +++ b/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go @@ -27,6 +27,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -57,24 +58,24 @@ func (r *ReplicatedStateMachine) Default() { var _ webhook.Validator = &ReplicatedStateMachine{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ReplicatedStateMachine) ValidateCreate() error { +func (r *ReplicatedStateMachine) ValidateCreate() (admission.Warnings, error) { replicatedstatemachinelog.Info("validate create", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ReplicatedStateMachine) ValidateUpdate(old runtime.Object) error { +func (r *ReplicatedStateMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { replicatedstatemachinelog.Info("validate update", "name", r.Name) - return r.validate() + return nil, r.validate() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ReplicatedStateMachine) ValidateDelete() error { +func (r *ReplicatedStateMachine) ValidateDelete() (admission.Warnings, error) { replicatedstatemachinelog.Info("validate delete", "name", r.Name) - return r.validate() + return nil, r.validate() } func (r *ReplicatedStateMachine) validate() error { diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 78697f0c221..9e687ab00e8 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -59,7 +59,6 @@ import ( "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" viper "github.com/apecloud/kubeblocks/internal/viperx" - "github.com/apecloud/kubeblocks/internal/webhook" ) // added lease.coordination.k8s.io for leader election @@ -457,11 +456,6 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "ServiceDescriptor") os.Exit(1) } - - if err = webhook.SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to setup webhook") - os.Exit(1) - } } if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml b/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml index ba3e79f8e16..5b25e90ea08 100644 --- a/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml +++ b/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuppolicytemplates.apps.kubeblocks.io diff --git a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml index 8404c93bbb1..538dc69bc71 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: clusterdefinitions.apps.kubeblocks.io @@ -700,6 +699,7 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -810,10 +810,12 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic type: array required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -900,6 +902,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -963,6 +966,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -1073,6 +1077,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The @@ -1131,6 +1136,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies @@ -1243,6 +1249,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -1306,6 +1313,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -1417,6 +1425,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The @@ -1475,6 +1484,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies @@ -1594,6 +1604,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -1613,6 +1624,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -1640,6 +1652,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -1662,6 +1675,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1694,6 +1708,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -1712,6 +1727,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -1776,7 +1792,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -1884,7 +1904,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -1971,8 +1995,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2006,7 +2029,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2188,8 +2214,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2223,7 +2248,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2318,6 +2346,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -2367,10 +2417,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -2506,8 +2579,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -2544,17 +2618,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -2605,8 +2674,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2640,7 +2708,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3009,6 +3080,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -3028,6 +3100,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -3055,6 +3128,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -3077,6 +3151,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3109,6 +3184,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -3127,6 +3203,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -3187,7 +3264,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -3295,7 +3376,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -3381,8 +3466,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3416,7 +3500,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3589,8 +3676,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3624,7 +3710,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3719,6 +3808,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources @@ -3769,10 +3880,16 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: Restart policy for the container to manage + the restart behavior of each container within a + pod. This may only be set for init containers. You + cannot set this field on ephemeral containers. + type: string securityContext: description: 'Optional: SecurityContext defines the security options the ephemeral container should @@ -3908,8 +4025,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -3946,17 +4064,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -3999,8 +4112,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4034,7 +4146,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -4331,6 +4446,7 @@ spec: uid?' type: string type: object + x-kubernetes-map-type: atomic type: array initContainers: description: 'List of initialization containers belonging @@ -4430,6 +4546,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -4449,6 +4566,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -4476,6 +4594,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -4498,6 +4617,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4530,6 +4650,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -4548,6 +4669,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4612,7 +4734,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -4720,7 +4846,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -4807,8 +4937,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4842,7 +4971,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5024,8 +5156,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5059,7 +5190,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5154,6 +5288,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -5203,10 +5359,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -5342,8 +5521,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -5380,17 +5560,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -5441,8 +5616,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5476,7 +5650,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5832,18 +6009,13 @@ spec: will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. - The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. - Pod validation will reject the pod if the concatenated - name is not valid for a ResourceClaim (e.g. - too long). \n An existing ResourceClaim with - that name that is not owned by the pod will - not be used for the pod to avoid using an unrelated - resource by mistake. Scheduling and pod startup - are then blocked until the unrelated ResourceClaim - is removed. \n This field is immutable and no - changes will be made to the corresponding ResourceClaim - by the control plane after creating the ResourceClaim." + The pod name and resource name, along with a + generated component, will be used to form a + unique name for the ResourceClaim, which will + be recorded in pod.status.resourceClaimStatuses. + \n This field is immutable and no changes will + be made to the corresponding ResourceClaim by + the control plane after creating the ResourceClaim." type: string type: object required: @@ -5855,8 +6027,9 @@ spec: x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the - pod. One of Always, OnFailure, Never. Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + pod. One of Always, OnFailure, Never. In some contexts, + only a subset of those values may be permitted. Default + to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string runtimeClassName: description: 'RuntimeClassName refers to a RuntimeClass @@ -5874,10 +6047,13 @@ spec: type: string schedulingGates: description: "SchedulingGates is an opaque list of values - that if specified will block scheduling the pod. More - info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. - \n This is an alpha-level feature enabled by PodSchedulingReadiness - feature gate." + that if specified will block scheduling the pod. If schedulingGates + is not empty, the pod will stay in the SchedulingGated + state and the scheduler will not attempt to schedule the + pod. \n SchedulingGates can only be set at pod creation + time, and be removed only afterwards. \n This is a beta + feature enabled by the PodSchedulingReadiness feature + gate." items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. @@ -5988,7 +6164,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must only be set if type is "Localhost". + Must be set if type is "Localhost". Must NOT be + set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -6058,15 +6235,11 @@ spec: type: string hostProcess: description: HostProcess determines if a container - should be run as a 'Host Process' container. This - field is alpha-level and will only be honored - by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the feature - flag will result in errors when validating the - Pod. All of a Pod's containers must have the same - effective HostProcess value (it is not allowed - to have a mix of HostProcess containers and non-HostProcess - containers). In addition, if HostProcess is true + should be run as a 'Host Process' container. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a + mix of HostProcess containers and non-HostProcess + containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: @@ -6228,16 +6401,22 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: - description: MatchLabelKeys is a set of pod label + description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated - for the incoming pod. Keys that don't exist in the - incoming pod labels will be ignored. A null or empty - list means only match against labelSelector. + for the incoming pod. The same key is forbidden + to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector + isn't set. Keys that don't exist in the incoming + pod labels will be ignored. A null or empty list + means only match against labelSelector. \n This + is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." items: type: string type: array @@ -6506,6 +6685,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' @@ -6542,6 +6722,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeID: description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' @@ -6623,6 +6804,7 @@ spec: or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external @@ -6657,6 +6839,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic readOnly: description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). @@ -6716,6 +6899,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be @@ -6764,6 +6948,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -6792,7 +6977,7 @@ spec: specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -6918,6 +7103,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the @@ -7052,7 +7238,8 @@ spec: for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -7112,6 +7299,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by @@ -7213,6 +7401,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -7406,6 +7595,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic targetPortal: description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if @@ -7593,6 +7783,7 @@ spec: defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -7625,6 +7816,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this @@ -7680,6 +7872,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -7755,6 +7948,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to @@ -7882,6 +8076,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' @@ -7927,6 +8122,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic sslEnabled: description: sslEnabled Flag enable/disable SSL communication with Gateway, default false @@ -8054,6 +8250,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeName: description: volumeName is the human-readable name of the StorageOS volume. Volume names @@ -8927,6 +9124,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -8946,6 +9144,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and @@ -8974,6 +9173,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -8996,6 +9196,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9096,6 +9297,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -9115,6 +9317,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and @@ -9143,6 +9346,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -9165,6 +9369,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9347,6 +9552,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -9366,6 +9572,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -9392,6 +9599,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -9414,6 +9622,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml index 3815b20abf1..f06624d15d3 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: clusters.apps.kubeblocks.io @@ -362,8 +361,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object x-kubernetes-preserve-unknown-fields: true @@ -622,8 +621,8 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object x-kubernetes-preserve-unknown-fields: true diff --git a/config/crd/bases/apps.kubeblocks.io_clusterversions.yaml b/config/crd/bases/apps.kubeblocks.io_clusterversions.yaml index 77302045da9..3e9507ef482 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusterversions.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusterversions.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: clusterversions.apps.kubeblocks.io @@ -230,6 +229,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -249,6 +249,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -275,6 +276,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -297,6 +299,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -370,6 +373,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -389,6 +393,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -415,6 +420,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -437,6 +443,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -545,6 +552,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -564,6 +572,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -591,6 +600,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -613,6 +623,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -645,6 +656,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -663,6 +675,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -727,7 +740,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -835,7 +852,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -922,8 +943,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -957,7 +977,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -1139,8 +1162,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -1174,7 +1196,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -1269,6 +1294,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -1318,10 +1365,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -1457,8 +1527,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -1495,17 +1566,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -1556,8 +1622,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -1591,7 +1656,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -1900,6 +1968,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -1919,6 +1988,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -1946,6 +2016,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -1968,6 +2039,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2000,6 +2072,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -2018,6 +2091,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -2082,7 +2156,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -2190,7 +2268,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -2277,8 +2359,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2312,7 +2393,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2494,8 +2578,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2529,7 +2612,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2624,6 +2710,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -2673,10 +2781,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -2812,8 +2943,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -2850,17 +2982,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -2911,8 +3038,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2946,7 +3072,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value diff --git a/config/crd/bases/apps.kubeblocks.io_componentclassdefinitions.yaml b/config/crd/bases/apps.kubeblocks.io_componentclassdefinitions.yaml index 9827863faa8..84569625c77 100644 --- a/config/crd/bases/apps.kubeblocks.io_componentclassdefinitions.yaml +++ b/config/crd/bases/apps.kubeblocks.io_componentclassdefinitions.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: componentclassdefinitions.apps.kubeblocks.io diff --git a/config/crd/bases/apps.kubeblocks.io_componentresourceconstraints.yaml b/config/crd/bases/apps.kubeblocks.io_componentresourceconstraints.yaml index d96a325fe7a..0854049bfa6 100644 --- a/config/crd/bases/apps.kubeblocks.io_componentresourceconstraints.yaml +++ b/config/crd/bases/apps.kubeblocks.io_componentresourceconstraints.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: componentresourceconstraints.apps.kubeblocks.io diff --git a/config/crd/bases/apps.kubeblocks.io_configconstraints.yaml b/config/crd/bases/apps.kubeblocks.io_configconstraints.yaml index 1d9f4c48175..e705f33b453 100644 --- a/config/crd/bases/apps.kubeblocks.io_configconstraints.yaml +++ b/config/crd/bases/apps.kubeblocks.io_configconstraints.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: configconstraints.apps.kubeblocks.io @@ -95,6 +94,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and @@ -135,6 +135,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -371,6 +372,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic staticParameters: description: staticParameters, list of StaticParameter, modifications of them trigger a process restart. diff --git a/config/crd/bases/apps.kubeblocks.io_configurations.yaml b/config/crd/bases/apps.kubeblocks.io_configurations.yaml index 758fde16d99..627422ccfe9 100644 --- a/config/crd/bases/apps.kubeblocks.io_configurations.yaml +++ b/config/crd/bases/apps.kubeblocks.io_configurations.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: configurations.apps.kubeblocks.io diff --git a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml index de028334380..4c5da53a769 100644 --- a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml +++ b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: opsrequests.apps.kubeblocks.io @@ -363,6 +362,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: array x-kubernetes-validations: - message: forbidden to update spec.scriptSpec.scriptFrom.configMapRef @@ -387,6 +387,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: array x-kubernetes-validations: - message: forbidden to update spec.scriptSpec.scriptFrom.secretRef @@ -467,6 +468,7 @@ spec: "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-validations: - message: forbidden to update spec.scriptSpec.script.selector rule: self == oldSelf @@ -616,7 +618,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object required: - componentName @@ -919,8 +922,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object services: description: services records the last services of the component. diff --git a/config/crd/bases/apps.kubeblocks.io_servicedescriptors.yaml b/config/crd/bases/apps.kubeblocks.io_servicedescriptors.yaml index 2f0dcd73083..653b4a30527 100644 --- a/config/crd/bases/apps.kubeblocks.io_servicedescriptors.yaml +++ b/config/crd/bases/apps.kubeblocks.io_servicedescriptors.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: servicedescriptors.apps.kubeblocks.io @@ -97,6 +96,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -114,6 +114,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -138,6 +139,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -157,6 +159,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object username: @@ -196,6 +199,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -213,6 +217,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -237,6 +242,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -256,6 +262,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object type: object @@ -294,6 +301,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -311,6 +319,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, @@ -335,6 +344,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -353,6 +363,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object port: @@ -390,6 +401,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -407,6 +419,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, @@ -431,6 +444,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -449,6 +463,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object serviceKind: diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml index f3261eca785..660c40806d3 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuppolicies.dataprotection.kubeblocks.io @@ -217,6 +216,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database @@ -413,6 +413,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database @@ -628,6 +629,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml index 8d61dbb411b..00fcc2ab7a4 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backuprepos.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuprepos.dataprotection.kubeblocks.io @@ -76,6 +75,7 @@ spec: name must be unique. type: string type: object + x-kubernetes-map-type: atomic pvReclaimPolicy: description: The reclaim policy for the PV created by this backup repo. @@ -190,6 +190,7 @@ spec: name must be unique. type: string type: object + x-kubernetes-map-type: atomic generatedStorageClassName: description: generatedStorageClassName indicates the generated storage class name. diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml index 3bce53724b5..27fa8f29e72 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backups.dataprotection.kubeblocks.io diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml index 2bec3e71768..5244e2b2203 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuptools.dataprotection.kubeblocks.io @@ -94,6 +93,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -111,6 +111,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -135,6 +136,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -153,6 +155,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -180,6 +183,7 @@ spec: description: Specify whether the ConfigMap must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -195,6 +199,7 @@ spec: description: Specify whether the Secret must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array x-kubernetes-preserve-unknown-fields: true @@ -299,7 +304,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object x-kubernetes-preserve-unknown-fields: true diff --git a/config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml b/config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml index 917f6a6390f..12cdf01405b 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: restorejobs.dataprotection.kubeblocks.io @@ -105,6 +104,7 @@ spec: "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database @@ -301,6 +301,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' @@ -332,6 +333,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeID: description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' @@ -403,6 +405,7 @@ spec: keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta @@ -433,6 +436,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic readOnly: description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). @@ -486,6 +490,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 @@ -529,6 +534,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -554,7 +560,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -665,6 +671,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if @@ -783,8 +790,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -838,6 +845,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by the claim. More info: @@ -928,6 +936,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -1105,6 +1114,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic targetPortal: description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than @@ -1275,6 +1285,7 @@ spec: or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -1304,6 +1315,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be @@ -1352,6 +1364,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -1417,6 +1430,7 @@ spec: Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project @@ -1533,6 +1547,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' @@ -1572,6 +1587,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic sslEnabled: description: sslEnabled Flag enable/disable SSL communication with Gateway, default false @@ -1687,6 +1703,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeName: description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within diff --git a/config/crd/bases/extensions.kubeblocks.io_addons.yaml b/config/crd/bases/extensions.kubeblocks.io_addons.yaml index 6c45a4f8f22..80f4c9f946b 100644 --- a/config/crd/bases/extensions.kubeblocks.io_addons.yaml +++ b/config/crd/bases/extensions.kubeblocks.io_addons.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: addons.extensions.kubeblocks.io diff --git a/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml b/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml index e53ece23fab..5c2b75b65b3 100644 --- a/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml +++ b/config/crd/bases/storage.kubeblocks.io_storageproviders.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: storageproviders.storage.kubeblocks.io diff --git a/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml b/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml index 94fbb5bca94..6e84668baf6 100644 --- a/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml +++ b/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: replicatedstatemachines.workloads.kubeblocks.io @@ -287,11 +286,10 @@ spec: supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was - under-specified and its meaning varies across implementations, - and it cannot support dual-stack. As of Kubernetes v1.24, - users are encouraged to use implementation-specific annotations - when available. This field may be removed in a future - API version.' + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, @@ -310,12 +308,21 @@ spec: port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port within the service. @@ -630,6 +637,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -647,6 +655,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -671,6 +680,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -690,6 +700,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object username: @@ -728,6 +739,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -745,6 +757,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -769,6 +782,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -788,6 +802,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object required: @@ -1072,6 +1087,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic service: description: service defines the behavior of a service spec. provides read-write service https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -1293,11 +1309,10 @@ spec: supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was - under-specified and its meaning varies across implementations, - and it cannot support dual-stack. As of Kubernetes v1.24, - users are encouraged to use implementation-specific annotations - when available. This field may be removed in a future API - version.' + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, @@ -1316,12 +1331,21 @@ spec: port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard service - names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names such - as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names (as + per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + * 'kubernetes.io/ws' - WebSocket over cleartext as + described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port within the service. @@ -1738,6 +1762,7 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -1844,10 +1869,12 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic type: array required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -1932,6 +1959,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -1993,6 +2021,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -2102,6 +2131,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -2159,6 +2189,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. @@ -2268,6 +2299,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -2329,6 +2361,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -2438,6 +2471,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -2495,6 +2529,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. @@ -2611,6 +2646,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -2630,6 +2666,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -2656,6 +2693,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -2678,6 +2716,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2710,6 +2749,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -2728,6 +2768,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -2790,7 +2831,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -2894,7 +2938,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -2979,8 +3026,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3014,7 +3060,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3191,8 +3240,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3226,7 +3274,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3318,6 +3369,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -3367,9 +3440,32 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the container + type. Setting the RestartPolicy as "Always" for the + init container will have the following effect: this + init container will be continually restarted on exit + until all regular containers have terminated. Once + all regular containers have completed, all init containers + with restartPolicy "Always" will be shut down. This + lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although + this init container still starts in the init container + sequence, it does not wait for the container to complete + before proceeding to the next init container. Instead, + the next init container starts immediately after this + init container is started, or after any startupProbe + has successfully completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, the fields @@ -3499,7 +3595,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + location. Must be set if type is "Localhost". + Must NOT be set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -3535,14 +3632,10 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean @@ -3593,8 +3686,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3628,7 +3720,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3990,6 +4085,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -4009,6 +4105,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -4035,6 +4132,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -4057,6 +4155,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4089,6 +4188,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -4107,6 +4207,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4165,7 +4266,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -4269,7 +4373,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -4352,8 +4459,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4387,7 +4493,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -4555,8 +4664,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4590,7 +4698,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -4682,6 +4793,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources @@ -4732,9 +4865,16 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: Restart policy for the container to manage + the restart behavior of each container within a pod. + This may only be set for init containers. You cannot + set this field on ephemeral containers. + type: string securityContext: description: 'Optional: SecurityContext defines the security options the ephemeral container should be @@ -4864,7 +5004,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + location. Must be set if type is "Localhost". + Must NOT be set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -4900,14 +5041,10 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean @@ -4950,8 +5087,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4985,7 +5121,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5276,6 +5415,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic type: array initContainers: description: 'List of initialization containers belonging @@ -5373,6 +5513,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -5392,6 +5533,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -5418,6 +5560,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -5440,6 +5583,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -5472,6 +5616,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -5490,6 +5635,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -5552,7 +5698,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -5656,7 +5805,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -5741,8 +5893,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5776,7 +5927,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5953,8 +6107,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5988,7 +6141,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -6080,6 +6236,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -6129,9 +6307,32 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the container + type. Setting the RestartPolicy as "Always" for the + init container will have the following effect: this + init container will be continually restarted on exit + until all regular containers have terminated. Once + all regular containers have completed, all init containers + with restartPolicy "Always" will be shut down. This + lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although + this init container still starts in the init container + sequence, it does not wait for the container to complete + before proceeding to the next init container. Instead, + the next init container starts immediately after this + init container is started, or after any startupProbe + has successfully completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, the fields @@ -6261,7 +6462,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + location. Must be set if type is "Localhost". + Must NOT be set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -6297,14 +6499,10 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean @@ -6355,8 +6553,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -6390,7 +6587,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -6738,19 +6938,14 @@ spec: namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, - the ResourceClaim will also be deleted. The name - of the ResourceClaim will be -, where is the PodResourceClaim.Name. - Pod validation will reject the pod if the concatenated - name is not valid for a ResourceClaim (e.g. too - long). \n An existing ResourceClaim with that - name that is not owned by the pod will not be - used for the pod to avoid using an unrelated resource - by mistake. Scheduling and pod startup are then - blocked until the unrelated ResourceClaim is removed. - \n This field is immutable and no changes will - be made to the corresponding ResourceClaim by - the control plane after creating the ResourceClaim." + the ResourceClaim will also be deleted. The pod + name and resource name, along with a generated + component, will be used to form a unique name + for the ResourceClaim, which will be recorded + in pod.status.resourceClaimStatuses. \n This field + is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane + after creating the ResourceClaim." type: string type: object required: @@ -6762,8 +6957,9 @@ spec: x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the - pod. One of Always, OnFailure, Never. Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + pod. One of Always, OnFailure, Never. In some contexts, + only a subset of those values may be permitted. Default + to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string runtimeClassName: description: 'RuntimeClassName refers to a RuntimeClass object @@ -6781,10 +6977,12 @@ spec: type: string schedulingGates: description: "SchedulingGates is an opaque list of values - that if specified will block scheduling the pod. More info: - \ https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. - \n This is an alpha-level feature enabled by PodSchedulingReadiness - feature gate." + that if specified will block scheduling the pod. If schedulingGates + is not empty, the pod will stay in the SchedulingGated state + and the scheduler will not attempt to schedule the pod. + \n SchedulingGates can only be set at pod creation time, + and be removed only afterwards. \n This is a beta feature + enabled by the PodSchedulingReadiness feature gate." items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. @@ -6894,8 +7092,9 @@ spec: defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's - configured seccomp profile location. Must only be - set if type is "Localhost". + configured seccomp profile location. Must be set + if type is "Localhost". Must NOT be set for any + other type. type: string type: description: "type indicates which kind of seccomp @@ -6963,15 +7162,11 @@ spec: type: string hostProcess: description: HostProcess determines if a container - should be run as a 'Host Process' container. This - field is alpha-level and will only be honored by - components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the feature - flag will result in errors when validating the Pod. - All of a Pod's containers must have the same effective + should be run as a 'Host Process' container. All + of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix - of HostProcess containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork + of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: @@ -7131,16 +7326,21 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: - description: MatchLabelKeys is a set of pod label keys + description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming - pod. Keys that don't exist in the incoming pod labels - will be ignored. A null or empty list means only match - against labelSelector. + pod. The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. MatchLabelKeys cannot be set when + LabelSelector isn't set. Keys that don't exist in + the incoming pod labels will be ignored. A null or + empty list means only match against labelSelector. + \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." items: type: string type: array @@ -7400,6 +7600,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' @@ -7435,6 +7636,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeID: description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' @@ -7514,6 +7716,7 @@ spec: or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external @@ -7547,6 +7750,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic readOnly: description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). @@ -7605,6 +7809,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be an @@ -7650,6 +7855,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -7677,7 +7883,7 @@ spec: be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that - the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -7801,6 +8007,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume @@ -7932,7 +8139,8 @@ spec: a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + Requests cannot exceed Limits. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -7990,6 +8198,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by the claim. @@ -8088,6 +8297,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -8278,6 +8488,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic targetPortal: description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if @@ -8460,6 +8671,7 @@ spec: the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -8491,6 +8703,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this @@ -8546,6 +8759,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -8617,6 +8831,7 @@ spec: the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project @@ -8743,6 +8958,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' @@ -8787,6 +9003,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic sslEnabled: description: sslEnabled Flag enable/disable SSL communication with Gateway, default false @@ -8912,6 +9129,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeName: description: volumeName is the human-readable name of the StorageOS volume. Volume names are only @@ -9085,6 +9303,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume @@ -9192,7 +9411,7 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -9241,6 +9460,7 @@ spec: contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' @@ -9265,6 +9485,50 @@ spec: items: type: string type: array + allocatedResourceStatuses: + additionalProperties: + description: When a controller receives persistentvolume + claim update with ClaimResourceStatus for a resource + that it does not recognizes, then it should ignore that + update and let other controllers handle it. + type: string + description: "allocatedResourceStatuses stores status of + resource being resized for the given PVC. Key names follow + standard Kubernetes label syntax. Valid values are either: + * Un-prefixed keys: - storage - the capacity of the volume. + * Custom resources must use implementation-defined prefixed + names such as \"example.com/my-custom-resource\" Apart + from above values - keys that are unprefixed or have kubernetes.io + prefix are considered reserved and hence may not be used. + \n ClaimResourceStatus can be in any of following states: + - ControllerResizeInProgress: State set when resize controller + starts resizing the volume in control-plane. - ControllerResizeFailed: + State set when resize has failed in resize controller + with a terminal error. - NodeResizePending: State set + when resize controller has finished resizing the volume + but further resizing of volume is needed on the node. + - NodeResizeInProgress: State set when kubelet starts + resizing the volume. - NodeResizeFailed: State set when + resizing has failed in kubelet with a terminal error. + Transient errors don't set NodeResizeFailed. For example: + if expanding a PVC for more capacity - this field can + be one of the following states: - pvc.status.allocatedResourceStatus['storage'] + = \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] + = \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizeFailed\" When this field is not set, it + means that no resize operation is in progress for the + given PVC. \n A controller that receives PVC update with + previously unknown resourceName or ClaimResourceStatus + should ignore the update for the purpose it was designed. + For example - a controller that only is responsible for + resizing capacity of the volume, should ignore PVC updates + that change other valid resources associated with PVC. + \n This is an alpha field and requires enabling RecoverVolumeExpansionFailure + feature." + type: object + x-kubernetes-map-type: granular allocatedResources: additionalProperties: anyOf: @@ -9272,18 +9536,30 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: allocatedResources is the storage resource - within AllocatedResources tracks the capacity allocated - to a PVC. It may be larger than the actual capacity when - a volume expansion operation is requested. For storage - quota, the larger value from allocatedResources and PVC.spec.resources - is used. If allocatedResources is not set, PVC.spec.resources - alone is used for quota calculation. If a volume expansion - capacity request is lowered, allocatedResources is only - lowered if there are no expansion operations in progress - and if the actual volume capacity is equal or lower than - the requested capacity. This is an alpha field and requires - enabling RecoverVolumeExpansionFailure feature. + description: "allocatedResources tracks the resources allocated + to a PVC including its capacity. Key names follow standard + Kubernetes label syntax. Valid values are either: * Un-prefixed + keys: - storage - the capacity of the volume. * Custom + resources must use implementation-defined prefixed names + such as \"example.com/my-custom-resource\" Apart from + above values - keys that are unprefixed or have kubernetes.io + prefix are considered reserved and hence may not be used. + \n Capacity reported here may be larger than the actual + capacity when a volume expansion operation is requested. + For storage quota, the larger value from allocatedResources + and PVC.spec.resources is used. If allocatedResources + is not set, PVC.spec.resources alone is used for quota + calculation. If a volume expansion capacity request is + lowered, allocatedResources is only lowered if there are + no expansion operations in progress and if the actual + volume capacity is equal or lower than the requested capacity. + \n A controller that receives PVC update with previously + unknown resourceName should ignore the update for the + purpose it was designed. For example - a controller that + only is responsible for resizing capacity of the volume, + should ignore PVC updates that change other valid resources + associated with PVC. \n This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature." type: object capacity: additionalProperties: @@ -9300,7 +9576,7 @@ spec: volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. items: - description: PersistentVolumeClaimCondition contails details + description: PersistentVolumeClaimCondition contains details about state of pvc properties: lastProbeTime: @@ -9338,13 +9614,6 @@ spec: phase: description: phase represents the current phase of PersistentVolumeClaim. type: string - resizeStatus: - description: resizeStatus stores status of resize operation. - ResizeStatus is not set by default but when expansion - is complete resizeStatus is set to empty string by resize - controller or kubelet. This is an alpha field and requires - enabling RecoverVolumeExpansionFailure feature. - type: string type: object type: object type: array diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e43058fbb5a..716b0ea9ce9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 5c03360ba33..562dc0d1d77 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -2,7 +2,6 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: @@ -69,7 +68,6 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - creationTimestamp: null name: validating-webhook-configuration webhooks: - admissionReviewVersions: diff --git a/controllers/apps/cluster_controller.go b/controllers/apps/cluster_controller.go index 522f8b66c35..ca41cbc5883 100644 --- a/controllers/apps/cluster_controller.go +++ b/controllers/apps/cluster_controller.go @@ -36,7 +36,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" @@ -230,22 +229,22 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&dataprotectionv1alpha1.BackupPolicy{}). Owns(&dataprotectionv1alpha1.Backup{}). Owns(&batchv1.Job{}). - Watches(&source.Kind{Type: &corev1.Pod{}}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)) + Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)) if viper.GetBool(constant.EnableRBACManager) { b.Owns(&rbacv1.ClusterRoleBinding{}). Owns(&rbacv1.RoleBinding{}). Owns(&corev1.ServiceAccount{}) } else { - b.Watches(&source.Kind{Type: &rbacv1.ClusterRoleBinding{}}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)). - Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)). - Watches(&source.Kind{Type: &corev1.ServiceAccount{}}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)) + b.Watches(&rbacv1.ClusterRoleBinding{}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)). + Watches(&rbacv1.RoleBinding{}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)). + Watches(&corev1.ServiceAccount{}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)) } return b.Complete(r) } -func (r *ClusterReconciler) filterClusterResources(obj client.Object) []reconcile.Request { +func (r *ClusterReconciler) filterClusterResources(ctx context.Context, obj client.Object) []reconcile.Request { labels := obj.GetLabels() if v, ok := labels[constant.AppManagedByLabelKey]; !ok || v != constant.AppName { return []reconcile.Request{} diff --git a/controllers/apps/components/types.go b/controllers/apps/components/types.go index 2cb046ad906..5c21d0629a3 100644 --- a/controllers/apps/components/types.go +++ b/controllers/apps/components/types.go @@ -25,14 +25,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/class" "github.com/apecloud/kubeblocks/internal/constant" types2 "github.com/apecloud/kubeblocks/internal/controller/client" + "github.com/apecloud/kubeblocks/internal/controller/component" "github.com/apecloud/kubeblocks/internal/controller/graph" "github.com/apecloud/kubeblocks/internal/controller/plan" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/component" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) diff --git a/controllers/apps/opsrequest_controller.go b/controllers/apps/opsrequest_controller.go index 5340d1c709e..024daf5a7bb 100644 --- a/controllers/apps/opsrequest_controller.go +++ b/controllers/apps/opsrequest_controller.go @@ -37,7 +37,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" @@ -84,8 +83,8 @@ func (r *OpsRequestReconciler) Reconcile(ctx context.Context, req ctrl.Request) func (r *OpsRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&appsv1alpha1.OpsRequest{}). - Watches(&source.Kind{Type: &appsv1alpha1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(r.parseAllOpsRequest)). - Watches(&source.Kind{Type: &dataprotectionv1alpha1.Backup{}}, handler.EnqueueRequestsFromMapFunc(r.parseBackupOpsRequest)). + Watches(&appsv1alpha1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.parseAllOpsRequest)). + Watches(&dataprotectionv1alpha1.Backup{}, handler.EnqueueRequestsFromMapFunc(r.parseBackupOpsRequest)). Complete(r) } @@ -285,7 +284,7 @@ func (r *OpsRequestReconciler) handleOpsReqDeletedDuringRunning(reqCtx intctrlut return nil } -func (r *OpsRequestReconciler) parseAllOpsRequest(object client.Object) []reconcile.Request { +func (r *OpsRequestReconciler) parseAllOpsRequest(ctx context.Context, object client.Object) []reconcile.Request { cluster := object.(*appsv1alpha1.Cluster) var ( opsRequestSlice []appsv1alpha1.OpsRecorder @@ -306,7 +305,7 @@ func (r *OpsRequestReconciler) parseAllOpsRequest(object client.Object) []reconc return requests } -func (r *OpsRequestReconciler) parseBackupOpsRequest(object client.Object) []reconcile.Request { +func (r *OpsRequestReconciler) parseBackupOpsRequest(ctx context.Context, object client.Object) []reconcile.Request { backup := object.(*dataprotectionv1alpha1.Backup) var ( requests []reconcile.Request diff --git a/controllers/apps/systemaccount_controller.go b/controllers/apps/systemaccount_controller.go index 2a4b47e0833..c9ffb25a0e6 100644 --- a/controllers/apps/systemaccount_controller.go +++ b/controllers/apps/systemaccount_controller.go @@ -38,7 +38,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/source" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" opsutil "github.com/apecloud/kubeblocks/controllers/apps/operations/util" @@ -292,7 +291,7 @@ func (r *SystemAccountReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&appsv1alpha1.Cluster{}). Owns(&corev1.Secret{}). - Watches(&source.Kind{Type: &batchv1.Job{}}, r.jobCompletionHandler()). + Watches(&batchv1.Job{}, r.jobCompletionHandler()). Complete(r) } @@ -469,7 +468,7 @@ func (r *SystemAccountReconciler) jobCompletionHandler() *handler.Funcs { // 2. has completed (either successed or failed) // 3. is under deletion (either by user or by TTL, where deletionTimestamp is set) return &handler.Funcs{ - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { var ( jobTerminated = false job *batchv1.Job diff --git a/controllers/dataprotection/backup_controller.go b/controllers/dataprotection/backup_controller.go index e64083fe855..a73eef996aa 100644 --- a/controllers/dataprotection/backup_controller.go +++ b/controllers/dataprotection/backup_controller.go @@ -54,7 +54,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" @@ -154,7 +153,7 @@ func (r *BackupReconciler) SetupWithManager(mgr ctrl.Manager) error { }). Owns(&batchv1.Job{}). Owns(&appsv1.StatefulSet{}). - Watches(&source.Kind{Type: &corev1.Pod{}}, handler.EnqueueRequestsFromMapFunc(r.filterBackupPods)) + Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(r.filterBackupPods)) if viper.GetBool("VOLUMESNAPSHOT") { if intctrlutil.InVolumeSnapshotV1Beta1() { @@ -249,7 +248,7 @@ func (r *BackupReconciler) handleBackupDeletion(reqCtx intctrlutil.RequestCtx, b return nil, nil } -func (r *BackupReconciler) filterBackupPods(obj client.Object) []reconcile.Request { +func (r *BackupReconciler) filterBackupPods(ctx context.Context, obj client.Object) []reconcile.Request { labels := obj.GetLabels() if v, ok := labels[constant.AppManagedByLabelKey]; !ok || v != constant.AppName { return []reconcile.Request{} diff --git a/controllers/dataprotection/backuppolicy_controller.go b/controllers/dataprotection/backuppolicy_controller.go index a8a26ee7530..4e2e6e8df8e 100644 --- a/controllers/dataprotection/backuppolicy_controller.go +++ b/controllers/dataprotection/backuppolicy_controller.go @@ -47,7 +47,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" @@ -129,7 +128,7 @@ func (r *BackupPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request func (r *BackupPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&dataprotectionv1alpha1.BackupPolicy{}). - Watches(&source.Kind{Type: &dataprotectionv1alpha1.Backup{}}, r.backupDeleteHandler(), + Watches(&dataprotectionv1alpha1.Backup{}, r.backupDeleteHandler(), builder.WithPredicates(predicate.NewPredicateFuncs(filterCreatedByPolicy))). WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(maxConcurDataProtectionReconKey), @@ -139,9 +138,8 @@ func (r *BackupPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *BackupPolicyReconciler) backupDeleteHandler() *handler.Funcs { return &handler.Funcs{ - DeleteFunc: func(event event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) { + DeleteFunc: func(ctx context.Context, event event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) { backup := event.Object.(*dataprotectionv1alpha1.Backup) - ctx := context.Background() backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} if err := r.Client.Get(ctx, types.NamespacedName{Name: backup.Spec.BackupPolicyName, Namespace: backup.Namespace}, backupPolicy); err != nil { return diff --git a/controllers/dataprotection/backuprepo_controller.go b/controllers/dataprotection/backuprepo_controller.go index 3aef46b42bf..6a2241e395a 100644 --- a/controllers/dataprotection/backuprepo_controller.go +++ b/controllers/dataprotection/backuprepo_controller.go @@ -49,7 +49,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" @@ -867,7 +866,7 @@ func (r *BackupRepoReconciler) deleteSecrets(reqCtx intctrlutil.RequestCtx, repo return nil } -func (r *BackupRepoReconciler) mapBackupToRepo(obj client.Object) []ctrl.Request { +func (r *BackupRepoReconciler) mapBackupToRepo(ctx context.Context, obj client.Object) []ctrl.Request { backup := obj.(*dpv1alpha1.Backup) repoName, ok := backup.Labels[dataProtectionBackupRepoKey] if !ok { @@ -890,11 +889,11 @@ func (r *BackupRepoReconciler) mapBackupToRepo(obj client.Object) []ctrl.Request return nil } -func (r *BackupRepoReconciler) mapProviderToRepos(obj client.Object) []ctrl.Request { +func (r *BackupRepoReconciler) mapProviderToRepos(ctx context.Context, obj client.Object) []ctrl.Request { return r.providerRefMapper.mapToRequests(obj) } -func (r *BackupRepoReconciler) mapSecretToRepos(obj client.Object) []ctrl.Request { +func (r *BackupRepoReconciler) mapSecretToRepos(ctx context.Context, obj client.Object) []ctrl.Request { // check if the secret is created by this controller owner := metav1.GetControllerOf(obj) if owner != nil { @@ -917,12 +916,9 @@ func (r *BackupRepoReconciler) mapSecretToRepos(obj client.Object) []ctrl.Reques func (r *BackupRepoReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&dpv1alpha1.BackupRepo{}). - Watches(&source.Kind{Type: &storagev1alpha1.StorageProvider{}}, - handler.EnqueueRequestsFromMapFunc(r.mapProviderToRepos)). - Watches(&source.Kind{Type: &dpv1alpha1.Backup{}}, - handler.EnqueueRequestsFromMapFunc(r.mapBackupToRepo)). - Watches(&source.Kind{Type: &corev1.Secret{}}, - handler.EnqueueRequestsFromMapFunc(r.mapSecretToRepos)). + Watches(&storagev1alpha1.StorageProvider{}, handler.EnqueueRequestsFromMapFunc(r.mapProviderToRepos)). + Watches(&dpv1alpha1.Backup{}, handler.EnqueueRequestsFromMapFunc(r.mapBackupToRepo)). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(r.mapSecretToRepos)). Owns(&storagev1.StorageClass{}). Owns(&corev1.PersistentVolumeClaim{}). Complete(r) diff --git a/controllers/extensions/addon_controller.go b/controllers/extensions/addon_controller.go index 496f2660b0a..8622bf68663 100644 --- a/controllers/extensions/addon_controller.go +++ b/controllers/extensions/addon_controller.go @@ -36,7 +36,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" @@ -147,14 +146,14 @@ func (r *AddonReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl func (r *AddonReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&extensionsv1alpha1.Addon{}). - Watches(&source.Kind{Type: &batchv1.Job{}}, handler.EnqueueRequestsFromMapFunc(r.findAddonJobs)). + Watches(&batchv1.Job{}, handler.EnqueueRequestsFromMapFunc(r.findAddonJobs)). WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(maxConcurrentReconcilesKey), }). Complete(r) } -func (r *AddonReconciler) findAddonJobs(job client.Object) []reconcile.Request { +func (r *AddonReconciler) findAddonJobs(ctx context.Context, job client.Object) []reconcile.Request { labels := job.GetLabels() if _, ok := labels[constant.AddonNameLabelKey]; !ok { return []reconcile.Request{} diff --git a/controllers/storage/storageprovider_controller.go b/controllers/storage/storageprovider_controller.go index 3109358c152..8727be241a7 100644 --- a/controllers/storage/storageprovider_controller.go +++ b/controllers/storage/storageprovider_controller.go @@ -33,7 +33,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" @@ -194,8 +193,8 @@ func (r *StorageProviderReconciler) deleteExternalResources( func (r *StorageProviderReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&storagev1alpha1.StorageProvider{}). - Watches(&source.Kind{Type: &storagev1.CSIDriver{}}, - handler.EnqueueRequestsFromMapFunc(func(object client.Object) []reconcile.Request { + Watches(&storagev1.CSIDriver{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request { r.mu.Lock() defer r.mu.Unlock() driverName := object.GetName() diff --git a/controllers/workloads/replicatedstatemachine_controller.go b/controllers/workloads/replicatedstatemachine_controller.go index 350484f4a47..47dee121b90 100644 --- a/controllers/workloads/replicatedstatemachine_controller.go +++ b/controllers/workloads/replicatedstatemachine_controller.go @@ -30,7 +30,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/source" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" @@ -151,9 +150,9 @@ func (r *ReplicatedStateMachineReconciler) SetupWithManager(mgr ctrl.Manager) er return ctrl.NewControllerManagedBy(mgr). For(&workloads.ReplicatedStateMachine{}). - Watches(&source.Kind{Type: &appsv1.StatefulSet{}}, stsHandler). - Watches(&source.Kind{Type: &batchv1.Job{}}, jobHandler). - Watches(&source.Kind{Type: &corev1.Pod{}}, podHandler). + Watches(&appsv1.StatefulSet{}, stsHandler). + Watches(&batchv1.Job{}, jobHandler). + Watches(&corev1.Pod{}, podHandler). Complete(r) } @@ -164,6 +163,6 @@ func (r *ReplicatedStateMachineReconciler) SetupWithManager(mgr ctrl.Manager) er For(&workloads.ReplicatedStateMachine{}). Owns(&appsv1.StatefulSet{}). Owns(&batchv1.Job{}). - Watches(&source.Kind{Type: &corev1.Pod{}}, podHandler). + Watches(&corev1.Pod{}, podHandler). Complete(r) } diff --git a/deploy/helm/config/rbac/role.yaml b/deploy/helm/config/rbac/role.yaml index e43058fbb5a..716b0ea9ce9 100644 --- a/deploy/helm/config/rbac/role.yaml +++ b/deploy/helm/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: diff --git a/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml b/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml index ba3e79f8e16..5b25e90ea08 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuppolicytemplates.apps.kubeblocks.io diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml index 8404c93bbb1..538dc69bc71 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: clusterdefinitions.apps.kubeblocks.io @@ -700,6 +699,7 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -810,10 +810,12 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic type: array required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -900,6 +902,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -963,6 +966,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -1073,6 +1077,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The @@ -1131,6 +1136,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies @@ -1243,6 +1249,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -1306,6 +1313,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -1417,6 +1425,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The @@ -1475,6 +1484,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies @@ -1594,6 +1604,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -1613,6 +1624,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -1640,6 +1652,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -1662,6 +1675,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1694,6 +1708,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -1712,6 +1727,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -1776,7 +1792,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -1884,7 +1904,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -1971,8 +1995,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2006,7 +2029,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2188,8 +2214,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2223,7 +2248,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2318,6 +2346,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -2367,10 +2417,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -2506,8 +2579,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -2544,17 +2618,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -2605,8 +2674,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2640,7 +2708,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3009,6 +3080,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -3028,6 +3100,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -3055,6 +3128,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -3077,6 +3151,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3109,6 +3184,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -3127,6 +3203,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -3187,7 +3264,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -3295,7 +3376,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -3381,8 +3466,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3416,7 +3500,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3589,8 +3676,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3624,7 +3710,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3719,6 +3808,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources @@ -3769,10 +3880,16 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: Restart policy for the container to manage + the restart behavior of each container within a + pod. This may only be set for init containers. You + cannot set this field on ephemeral containers. + type: string securityContext: description: 'Optional: SecurityContext defines the security options the ephemeral container should @@ -3908,8 +4025,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -3946,17 +4064,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -3999,8 +4112,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4034,7 +4146,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -4331,6 +4446,7 @@ spec: uid?' type: string type: object + x-kubernetes-map-type: atomic type: array initContainers: description: 'List of initialization containers belonging @@ -4430,6 +4546,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -4449,6 +4566,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -4476,6 +4594,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -4498,6 +4617,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4530,6 +4650,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -4548,6 +4669,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4612,7 +4734,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -4720,7 +4846,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -4807,8 +4937,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4842,7 +4971,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5024,8 +5156,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5059,7 +5190,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5154,6 +5288,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -5203,10 +5359,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -5342,8 +5521,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -5380,17 +5560,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -5441,8 +5616,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5476,7 +5650,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5832,18 +6009,13 @@ spec: will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. - The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. - Pod validation will reject the pod if the concatenated - name is not valid for a ResourceClaim (e.g. - too long). \n An existing ResourceClaim with - that name that is not owned by the pod will - not be used for the pod to avoid using an unrelated - resource by mistake. Scheduling and pod startup - are then blocked until the unrelated ResourceClaim - is removed. \n This field is immutable and no - changes will be made to the corresponding ResourceClaim - by the control plane after creating the ResourceClaim." + The pod name and resource name, along with a + generated component, will be used to form a + unique name for the ResourceClaim, which will + be recorded in pod.status.resourceClaimStatuses. + \n This field is immutable and no changes will + be made to the corresponding ResourceClaim by + the control plane after creating the ResourceClaim." type: string type: object required: @@ -5855,8 +6027,9 @@ spec: x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the - pod. One of Always, OnFailure, Never. Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + pod. One of Always, OnFailure, Never. In some contexts, + only a subset of those values may be permitted. Default + to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string runtimeClassName: description: 'RuntimeClassName refers to a RuntimeClass @@ -5874,10 +6047,13 @@ spec: type: string schedulingGates: description: "SchedulingGates is an opaque list of values - that if specified will block scheduling the pod. More - info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. - \n This is an alpha-level feature enabled by PodSchedulingReadiness - feature gate." + that if specified will block scheduling the pod. If schedulingGates + is not empty, the pod will stay in the SchedulingGated + state and the scheduler will not attempt to schedule the + pod. \n SchedulingGates can only be set at pod creation + time, and be removed only afterwards. \n This is a beta + feature enabled by the PodSchedulingReadiness feature + gate." items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. @@ -5988,7 +6164,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must only be set if type is "Localhost". + Must be set if type is "Localhost". Must NOT be + set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -6058,15 +6235,11 @@ spec: type: string hostProcess: description: HostProcess determines if a container - should be run as a 'Host Process' container. This - field is alpha-level and will only be honored - by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the feature - flag will result in errors when validating the - Pod. All of a Pod's containers must have the same - effective HostProcess value (it is not allowed - to have a mix of HostProcess containers and non-HostProcess - containers). In addition, if HostProcess is true + should be run as a 'Host Process' container. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a + mix of HostProcess containers and non-HostProcess + containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: @@ -6228,16 +6401,22 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: - description: MatchLabelKeys is a set of pod label + description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated - for the incoming pod. Keys that don't exist in the - incoming pod labels will be ignored. A null or empty - list means only match against labelSelector. + for the incoming pod. The same key is forbidden + to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector + isn't set. Keys that don't exist in the incoming + pod labels will be ignored. A null or empty list + means only match against labelSelector. \n This + is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." items: type: string type: array @@ -6506,6 +6685,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' @@ -6542,6 +6722,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeID: description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' @@ -6623,6 +6804,7 @@ spec: or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external @@ -6657,6 +6839,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic readOnly: description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). @@ -6716,6 +6899,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be @@ -6764,6 +6948,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -6792,7 +6977,7 @@ spec: specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -6918,6 +7103,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the @@ -7052,7 +7238,8 @@ spec: for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -7112,6 +7299,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by @@ -7213,6 +7401,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -7406,6 +7595,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic targetPortal: description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if @@ -7593,6 +7783,7 @@ spec: defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -7625,6 +7816,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this @@ -7680,6 +7872,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -7755,6 +7948,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to @@ -7882,6 +8076,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' @@ -7927,6 +8122,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic sslEnabled: description: sslEnabled Flag enable/disable SSL communication with Gateway, default false @@ -8054,6 +8250,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeName: description: volumeName is the human-readable name of the StorageOS volume. Volume names @@ -8927,6 +9124,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -8946,6 +9144,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and @@ -8974,6 +9173,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -8996,6 +9196,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9096,6 +9297,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -9115,6 +9317,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and @@ -9143,6 +9346,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -9165,6 +9369,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9347,6 +9552,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -9366,6 +9572,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -9392,6 +9599,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -9414,6 +9622,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml index 3815b20abf1..f06624d15d3 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: clusters.apps.kubeblocks.io @@ -362,8 +361,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object x-kubernetes-preserve-unknown-fields: true @@ -622,8 +621,8 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object x-kubernetes-preserve-unknown-fields: true diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusterversions.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusterversions.yaml index 77302045da9..3e9507ef482 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusterversions.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusterversions.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: clusterversions.apps.kubeblocks.io @@ -230,6 +229,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -249,6 +249,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -275,6 +276,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -297,6 +299,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -370,6 +373,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -389,6 +393,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -415,6 +420,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -437,6 +443,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -545,6 +552,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -564,6 +572,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -591,6 +600,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -613,6 +623,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -645,6 +656,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -663,6 +675,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -727,7 +740,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -835,7 +852,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -922,8 +943,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -957,7 +977,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -1139,8 +1162,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -1174,7 +1196,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -1269,6 +1294,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -1318,10 +1365,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -1457,8 +1527,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -1495,17 +1566,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -1556,8 +1622,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -1591,7 +1656,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -1900,6 +1968,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -1919,6 +1988,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests @@ -1946,6 +2016,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -1968,6 +2039,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2000,6 +2072,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -2018,6 +2091,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -2082,7 +2156,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -2190,7 +2268,11 @@ spec: custom header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. type: string value: description: The header field value @@ -2277,8 +2359,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2312,7 +2393,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2494,8 +2578,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2529,7 +2612,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -2624,6 +2710,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -2673,10 +2781,33 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, @@ -2812,8 +2943,9 @@ spec: be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured - seccomp profile location. Must only be set - if type is "Localhost". + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. type: string type: description: "type indicates which kind of @@ -2850,17 +2982,12 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only - be honored by components that enable the - WindowsHostProcessContainers feature flag. - Setting this field without the feature flag - will result in errors when validating the - Pod. All of a Pod's containers must have - the same effective HostProcess value (it - is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork - must also be set to true. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. type: boolean runAsUserName: description: The UserName in Windows to run @@ -2911,8 +3038,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -2946,7 +3072,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. type: string value: description: The header field value diff --git a/deploy/helm/crds/apps.kubeblocks.io_componentclassdefinitions.yaml b/deploy/helm/crds/apps.kubeblocks.io_componentclassdefinitions.yaml index 9827863faa8..84569625c77 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_componentclassdefinitions.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_componentclassdefinitions.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: componentclassdefinitions.apps.kubeblocks.io diff --git a/deploy/helm/crds/apps.kubeblocks.io_componentresourceconstraints.yaml b/deploy/helm/crds/apps.kubeblocks.io_componentresourceconstraints.yaml index d96a325fe7a..0854049bfa6 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_componentresourceconstraints.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_componentresourceconstraints.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: componentresourceconstraints.apps.kubeblocks.io diff --git a/deploy/helm/crds/apps.kubeblocks.io_configconstraints.yaml b/deploy/helm/crds/apps.kubeblocks.io_configconstraints.yaml index 1d9f4c48175..e705f33b453 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_configconstraints.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_configconstraints.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: configconstraints.apps.kubeblocks.io @@ -95,6 +94,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and @@ -135,6 +135,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -371,6 +372,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic staticParameters: description: staticParameters, list of StaticParameter, modifications of them trigger a process restart. diff --git a/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml b/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml index 758fde16d99..627422ccfe9 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: configurations.apps.kubeblocks.io diff --git a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml index de028334380..4c5da53a769 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: opsrequests.apps.kubeblocks.io @@ -363,6 +362,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: array x-kubernetes-validations: - message: forbidden to update spec.scriptSpec.scriptFrom.configMapRef @@ -387,6 +387,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: array x-kubernetes-validations: - message: forbidden to update spec.scriptSpec.scriptFrom.secretRef @@ -467,6 +468,7 @@ spec: "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-validations: - message: forbidden to update spec.scriptSpec.script.selector rule: self == oldSelf @@ -616,7 +618,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object required: - componentName @@ -919,8 +922,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object services: description: services records the last services of the component. diff --git a/deploy/helm/crds/apps.kubeblocks.io_servicedescriptors.yaml b/deploy/helm/crds/apps.kubeblocks.io_servicedescriptors.yaml index 2f0dcd73083..653b4a30527 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_servicedescriptors.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_servicedescriptors.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: servicedescriptors.apps.kubeblocks.io @@ -97,6 +96,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -114,6 +114,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -138,6 +139,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -157,6 +159,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object username: @@ -196,6 +199,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -213,6 +217,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -237,6 +242,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -256,6 +262,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object type: object @@ -294,6 +301,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -311,6 +319,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, @@ -335,6 +344,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -353,6 +363,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object port: @@ -390,6 +401,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -407,6 +419,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, @@ -431,6 +444,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -449,6 +463,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object serviceKind: diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml index f3261eca785..660c40806d3 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuppolicies.dataprotection.kubeblocks.io @@ -217,6 +216,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database @@ -413,6 +413,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database @@ -628,6 +629,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml index 8d61dbb411b..00fcc2ab7a4 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backuprepos.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuprepos.dataprotection.kubeblocks.io @@ -76,6 +75,7 @@ spec: name must be unique. type: string type: object + x-kubernetes-map-type: atomic pvReclaimPolicy: description: The reclaim policy for the PV created by this backup repo. @@ -190,6 +190,7 @@ spec: name must be unique. type: string type: object + x-kubernetes-map-type: atomic generatedStorageClassName: description: generatedStorageClassName indicates the generated storage class name. diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml index 3bce53724b5..27fa8f29e72 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backups.dataprotection.kubeblocks.io diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml index 2bec3e71768..5244e2b2203 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: backuptools.dataprotection.kubeblocks.io @@ -94,6 +93,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -111,6 +111,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -135,6 +136,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -153,6 +155,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -180,6 +183,7 @@ spec: description: Specify whether the ConfigMap must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -195,6 +199,7 @@ spec: description: Specify whether the Secret must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array x-kubernetes-preserve-unknown-fields: true @@ -299,7 +304,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object x-kubernetes-preserve-unknown-fields: true diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml index 917f6a6390f..12cdf01405b 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: restorejobs.dataprotection.kubeblocks.io @@ -105,6 +104,7 @@ spec: "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true secret: description: secret is used to connect to the target database @@ -301,6 +301,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' @@ -332,6 +333,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeID: description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' @@ -403,6 +405,7 @@ spec: keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta @@ -433,6 +436,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic readOnly: description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). @@ -486,6 +490,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 @@ -529,6 +534,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -554,7 +560,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -665,6 +671,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if @@ -783,8 +790,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -838,6 +845,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by the claim. More info: @@ -928,6 +936,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -1105,6 +1114,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic targetPortal: description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than @@ -1275,6 +1285,7 @@ spec: or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -1304,6 +1315,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be @@ -1352,6 +1364,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -1417,6 +1430,7 @@ spec: Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project @@ -1533,6 +1547,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' @@ -1572,6 +1587,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic sslEnabled: description: sslEnabled Flag enable/disable SSL communication with Gateway, default false @@ -1687,6 +1703,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeName: description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within diff --git a/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml b/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml index 6c45a4f8f22..80f4c9f946b 100644 --- a/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml +++ b/deploy/helm/crds/extensions.kubeblocks.io_addons.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: addons.extensions.kubeblocks.io diff --git a/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml b/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml index e53ece23fab..5c2b75b65b3 100644 --- a/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml +++ b/deploy/helm/crds/storage.kubeblocks.io_storageproviders.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: storageproviders.storage.kubeblocks.io diff --git a/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml b/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml index 94fbb5bca94..6e84668baf6 100644 --- a/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml +++ b/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks name: replicatedstatemachines.workloads.kubeblocks.io @@ -287,11 +286,10 @@ spec: supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was - under-specified and its meaning varies across implementations, - and it cannot support dual-stack. As of Kubernetes v1.24, - users are encouraged to use implementation-specific annotations - when available. This field may be removed in a future - API version.' + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, @@ -310,12 +308,21 @@ spec: port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port within the service. @@ -630,6 +637,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -647,6 +655,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -671,6 +680,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -690,6 +700,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object username: @@ -728,6 +739,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, @@ -745,6 +757,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, @@ -769,6 +782,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace properties: @@ -788,6 +802,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object type: object required: @@ -1072,6 +1087,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic service: description: service defines the behavior of a service spec. provides read-write service https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -1293,11 +1309,10 @@ spec: supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was - under-specified and its meaning varies across implementations, - and it cannot support dual-stack. As of Kubernetes v1.24, - users are encouraged to use implementation-specific annotations - when available. This field may be removed in a future API - version.' + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, @@ -1316,12 +1331,21 @@ spec: port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard service - names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names such - as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names (as + per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + * 'kubernetes.io/ws' - WebSocket over cleartext as + described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port within the service. @@ -1738,6 +1762,7 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -1844,10 +1869,12 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic type: array required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -1932,6 +1959,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -1993,6 +2021,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -2102,6 +2131,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -2159,6 +2189,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. @@ -2268,6 +2299,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -2329,6 +2361,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -2438,6 +2471,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -2495,6 +2529,7 @@ spec: only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. @@ -2611,6 +2646,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -2630,6 +2666,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -2656,6 +2693,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -2678,6 +2716,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2710,6 +2749,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -2728,6 +2768,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -2790,7 +2831,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -2894,7 +2938,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -2979,8 +3026,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3014,7 +3060,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3191,8 +3240,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3226,7 +3274,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3318,6 +3369,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -3367,9 +3440,32 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the container + type. Setting the RestartPolicy as "Always" for the + init container will have the following effect: this + init container will be continually restarted on exit + until all regular containers have terminated. Once + all regular containers have completed, all init containers + with restartPolicy "Always" will be shut down. This + lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although + this init container still starts in the init container + sequence, it does not wait for the container to complete + before proceeding to the next init container. Instead, + the next init container starts immediately after this + init container is started, or after any startupProbe + has successfully completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, the fields @@ -3499,7 +3595,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + location. Must be set if type is "Localhost". + Must NOT be set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -3535,14 +3632,10 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean @@ -3593,8 +3686,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -3628,7 +3720,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -3990,6 +4085,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -4009,6 +4105,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -4035,6 +4132,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -4057,6 +4155,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4089,6 +4188,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -4107,6 +4207,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4165,7 +4266,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -4269,7 +4373,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -4352,8 +4459,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4387,7 +4493,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -4555,8 +4664,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4590,7 +4698,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -4682,6 +4793,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources @@ -4732,9 +4865,16 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: Restart policy for the container to manage + the restart behavior of each container within a pod. + This may only be set for init containers. You cannot + set this field on ephemeral containers. + type: string securityContext: description: 'Optional: SecurityContext defines the security options the ephemeral container should be @@ -4864,7 +5004,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + location. Must be set if type is "Localhost". + Must NOT be set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -4900,14 +5041,10 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean @@ -4950,8 +5087,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -4985,7 +5121,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5276,6 +5415,7 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + x-kubernetes-map-type: atomic type: array initContainers: description: 'List of initialization containers belonging @@ -5373,6 +5513,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, @@ -5392,6 +5533,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, @@ -5418,6 +5560,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -5440,6 +5583,7 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -5472,6 +5616,7 @@ spec: must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -5490,6 +5635,7 @@ spec: be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -5552,7 +5698,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -5656,7 +5805,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. + This will be canonicalized upon + output, so case-variant names will + be understood as the same header. type: string value: description: The header field value @@ -5741,8 +5893,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5776,7 +5927,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -5953,8 +6107,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -5988,7 +6141,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -6080,6 +6236,28 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -6129,9 +6307,32 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the container + type. Setting the RestartPolicy as "Always" for the + init container will have the following effect: this + init container will be continually restarted on exit + until all regular containers have terminated. Once + all regular containers have completed, all init containers + with restartPolicy "Always" will be shut down. This + lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although + this init container still starts in the init container + sequence, it does not wait for the container to complete + before proceeding to the next init container. Instead, + the next init container starts immediately after this + init container is started, or after any startupProbe + has successfully completed.' + type: string securityContext: description: 'SecurityContext defines the security options the container should be run with. If set, the fields @@ -6261,7 +6462,8 @@ spec: The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + location. Must be set if type is "Localhost". + Must NOT be set for any other type. type: string type: description: "type indicates which kind of seccomp @@ -6297,14 +6499,10 @@ spec: hostProcess: description: HostProcess determines if a container should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean @@ -6355,8 +6553,7 @@ spec: type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -6390,7 +6587,10 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. type: string value: description: The header field value @@ -6738,19 +6938,14 @@ spec: namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, - the ResourceClaim will also be deleted. The name - of the ResourceClaim will be -, where is the PodResourceClaim.Name. - Pod validation will reject the pod if the concatenated - name is not valid for a ResourceClaim (e.g. too - long). \n An existing ResourceClaim with that - name that is not owned by the pod will not be - used for the pod to avoid using an unrelated resource - by mistake. Scheduling and pod startup are then - blocked until the unrelated ResourceClaim is removed. - \n This field is immutable and no changes will - be made to the corresponding ResourceClaim by - the control plane after creating the ResourceClaim." + the ResourceClaim will also be deleted. The pod + name and resource name, along with a generated + component, will be used to form a unique name + for the ResourceClaim, which will be recorded + in pod.status.resourceClaimStatuses. \n This field + is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane + after creating the ResourceClaim." type: string type: object required: @@ -6762,8 +6957,9 @@ spec: x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the - pod. One of Always, OnFailure, Never. Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + pod. One of Always, OnFailure, Never. In some contexts, + only a subset of those values may be permitted. Default + to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string runtimeClassName: description: 'RuntimeClassName refers to a RuntimeClass object @@ -6781,10 +6977,12 @@ spec: type: string schedulingGates: description: "SchedulingGates is an opaque list of values - that if specified will block scheduling the pod. More info: - \ https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. - \n This is an alpha-level feature enabled by PodSchedulingReadiness - feature gate." + that if specified will block scheduling the pod. If schedulingGates + is not empty, the pod will stay in the SchedulingGated state + and the scheduler will not attempt to schedule the pod. + \n SchedulingGates can only be set at pod creation time, + and be removed only afterwards. \n This is a beta feature + enabled by the PodSchedulingReadiness feature gate." items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. @@ -6894,8 +7092,9 @@ spec: defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's - configured seccomp profile location. Must only be - set if type is "Localhost". + configured seccomp profile location. Must be set + if type is "Localhost". Must NOT be set for any + other type. type: string type: description: "type indicates which kind of seccomp @@ -6963,15 +7162,11 @@ spec: type: string hostProcess: description: HostProcess determines if a container - should be run as a 'Host Process' container. This - field is alpha-level and will only be honored by - components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the feature - flag will result in errors when validating the Pod. - All of a Pod's containers must have the same effective + should be run as a 'Host Process' container. All + of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix - of HostProcess containers and non-HostProcess containers). In - addition, if HostProcess is true then HostNetwork + of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: @@ -7131,16 +7326,21 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: - description: MatchLabelKeys is a set of pod label keys + description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming - pod. Keys that don't exist in the incoming pod labels - will be ignored. A null or empty list means only match - against labelSelector. + pod. The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. MatchLabelKeys cannot be set when + LabelSelector isn't set. Keys that don't exist in + the incoming pod labels will be ignored. A null or + empty list means only match against labelSelector. + \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." items: type: string type: array @@ -7400,6 +7600,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' @@ -7435,6 +7636,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeID: description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' @@ -7514,6 +7716,7 @@ spec: or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external @@ -7547,6 +7750,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic readOnly: description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). @@ -7605,6 +7809,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this file, must be an @@ -7650,6 +7855,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -7677,7 +7883,7 @@ spec: be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that - the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -7801,6 +8007,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume @@ -7932,7 +8139,8 @@ spec: a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + Requests cannot exceed Limits. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -7990,6 +8198,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by the claim. @@ -8088,6 +8297,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -8278,6 +8488,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic targetPortal: description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if @@ -8460,6 +8671,7 @@ spec: the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -8491,6 +8703,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: description: 'Optional: mode bits used to set permissions on this @@ -8546,6 +8759,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object @@ -8617,6 +8831,7 @@ spec: the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project @@ -8743,6 +8958,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic user: description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' @@ -8787,6 +9003,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic sslEnabled: description: sslEnabled Flag enable/disable SSL communication with Gateway, default false @@ -8912,6 +9129,7 @@ spec: kind, uid?' type: string type: object + x-kubernetes-map-type: atomic volumeName: description: volumeName is the human-readable name of the StorageOS volume. Volume names are only @@ -9085,6 +9303,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume @@ -9192,7 +9411,7 @@ spec: of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -9241,6 +9460,7 @@ spec: contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' @@ -9265,6 +9485,50 @@ spec: items: type: string type: array + allocatedResourceStatuses: + additionalProperties: + description: When a controller receives persistentvolume + claim update with ClaimResourceStatus for a resource + that it does not recognizes, then it should ignore that + update and let other controllers handle it. + type: string + description: "allocatedResourceStatuses stores status of + resource being resized for the given PVC. Key names follow + standard Kubernetes label syntax. Valid values are either: + * Un-prefixed keys: - storage - the capacity of the volume. + * Custom resources must use implementation-defined prefixed + names such as \"example.com/my-custom-resource\" Apart + from above values - keys that are unprefixed or have kubernetes.io + prefix are considered reserved and hence may not be used. + \n ClaimResourceStatus can be in any of following states: + - ControllerResizeInProgress: State set when resize controller + starts resizing the volume in control-plane. - ControllerResizeFailed: + State set when resize has failed in resize controller + with a terminal error. - NodeResizePending: State set + when resize controller has finished resizing the volume + but further resizing of volume is needed on the node. + - NodeResizeInProgress: State set when kubelet starts + resizing the volume. - NodeResizeFailed: State set when + resizing has failed in kubelet with a terminal error. + Transient errors don't set NodeResizeFailed. For example: + if expanding a PVC for more capacity - this field can + be one of the following states: - pvc.status.allocatedResourceStatus['storage'] + = \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] + = \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizeFailed\" When this field is not set, it + means that no resize operation is in progress for the + given PVC. \n A controller that receives PVC update with + previously unknown resourceName or ClaimResourceStatus + should ignore the update for the purpose it was designed. + For example - a controller that only is responsible for + resizing capacity of the volume, should ignore PVC updates + that change other valid resources associated with PVC. + \n This is an alpha field and requires enabling RecoverVolumeExpansionFailure + feature." + type: object + x-kubernetes-map-type: granular allocatedResources: additionalProperties: anyOf: @@ -9272,18 +9536,30 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: allocatedResources is the storage resource - within AllocatedResources tracks the capacity allocated - to a PVC. It may be larger than the actual capacity when - a volume expansion operation is requested. For storage - quota, the larger value from allocatedResources and PVC.spec.resources - is used. If allocatedResources is not set, PVC.spec.resources - alone is used for quota calculation. If a volume expansion - capacity request is lowered, allocatedResources is only - lowered if there are no expansion operations in progress - and if the actual volume capacity is equal or lower than - the requested capacity. This is an alpha field and requires - enabling RecoverVolumeExpansionFailure feature. + description: "allocatedResources tracks the resources allocated + to a PVC including its capacity. Key names follow standard + Kubernetes label syntax. Valid values are either: * Un-prefixed + keys: - storage - the capacity of the volume. * Custom + resources must use implementation-defined prefixed names + such as \"example.com/my-custom-resource\" Apart from + above values - keys that are unprefixed or have kubernetes.io + prefix are considered reserved and hence may not be used. + \n Capacity reported here may be larger than the actual + capacity when a volume expansion operation is requested. + For storage quota, the larger value from allocatedResources + and PVC.spec.resources is used. If allocatedResources + is not set, PVC.spec.resources alone is used for quota + calculation. If a volume expansion capacity request is + lowered, allocatedResources is only lowered if there are + no expansion operations in progress and if the actual + volume capacity is equal or lower than the requested capacity. + \n A controller that receives PVC update with previously + unknown resourceName should ignore the update for the + purpose it was designed. For example - a controller that + only is responsible for resizing capacity of the volume, + should ignore PVC updates that change other valid resources + associated with PVC. \n This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature." type: object capacity: additionalProperties: @@ -9300,7 +9576,7 @@ spec: volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. items: - description: PersistentVolumeClaimCondition contails details + description: PersistentVolumeClaimCondition contains details about state of pvc properties: lastProbeTime: @@ -9338,13 +9614,6 @@ spec: phase: description: phase represents the current phase of PersistentVolumeClaim. type: string - resizeStatus: - description: resizeStatus stores status of resize operation. - ResizeStatus is not set by default but when expansion - is complete resizeStatus is set to empty string by resize - controller or kubelet. This is an alpha field and requires - enabling RecoverVolumeExpansionFailure feature. - type: string type: object type: object type: array diff --git a/go.mod b/go.mod index b1d1d74378c..c48b0cc0ded 100644 --- a/go.mod +++ b/go.mod @@ -18,21 +18,21 @@ require ( github.com/bhmj/jsonslice v1.1.2 github.com/briandowns/spinner v1.23.0 github.com/cenkalti/backoff/v4 v4.2.1 - github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230423031423-0b31a519b502 + github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230912020346-a5d89c1c90ad github.com/clbanning/mxj/v2 v2.5.7 github.com/containerd/stargz-snapshotter/estargz v0.14.3 - github.com/containers/common v0.49.1 + github.com/containers/common v0.55.4 github.com/dapr/kit v0.11.3 github.com/deckarep/golang-set/v2 v2.3.1 github.com/dlclark/regexp2 v1.10.0 - github.com/docker/docker v24.0.2+incompatible + github.com/docker/docker v24.0.6+incompatible github.com/docker/go-connections v0.4.1-0.20190612165340-fd1b1942c4d5 github.com/dustin/go-humanize v1.0.1 github.com/evanphx/json-patch v5.6.0+incompatible github.com/fatih/color v1.15.0 github.com/fsnotify/fsnotify v1.6.0 github.com/ghodss/yaml v1.0.0 - github.com/go-errors/errors v1.4.0 + github.com/go-errors/errors v1.4.2 github.com/go-git/go-git/v5 v5.6.1 github.com/go-logr/logr v1.2.4 github.com/go-logr/zapr v1.2.4 @@ -49,7 +49,7 @@ require ( github.com/jackc/pgx/v5 v5.4.3 github.com/jedib0t/go-pretty/v6 v6.4.6 github.com/json-iterator/go v1.1.12 - github.com/k3d-io/k3d/v5 v5.4.4 + github.com/k3d-io/k3d/v5 v5.5.2 github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 github.com/kubesphere/kubekey/v3 v3.0.7 @@ -60,7 +60,7 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.8 - github.com/opencontainers/image-spec v1.1.0-rc2 + github.com/opencontainers/image-spec v1.1.0-rc5 github.com/pashagolub/pgxmock/v2 v2.11.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 @@ -86,59 +86,59 @@ require ( github.com/xdg-go/scram v1.1.2 github.com/xeipuuv/gojsonschema v1.2.0 go.etcd.io/etcd/client/v3 v3.5.9 - go.etcd.io/etcd/server/v3 v3.5.6 + go.etcd.io/etcd/server/v3 v3.5.9 go.mongodb.org/mongo-driver v1.11.6 go.uber.org/automaxprocs v1.5.2 go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.12.0 + golang.org/x/crypto v0.13.0 golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb golang.org/x/net v0.14.0 golang.org/x/oauth2 v0.9.0 golang.org/x/sync v0.3.0 - golang.org/x/text v0.12.0 + golang.org/x/text v0.13.0 google.golang.org/grpc v1.56.2 google.golang.org/protobuf v1.31.0 gopkg.in/inf.v0 v0.9.1 gopkg.in/yaml.v2 v2.4.0 - helm.sh/helm/v3 v3.11.1 - k8s.io/api v0.26.3 - k8s.io/apiextensions-apiserver v0.26.3 - k8s.io/apimachinery v0.26.3 - k8s.io/cli-runtime v0.26.3 - k8s.io/client-go v0.26.3 - k8s.io/code-generator v0.26.3 - k8s.io/component-base v0.26.3 + helm.sh/helm/v3 v3.12.3 + k8s.io/api v0.28.2 + k8s.io/apiextensions-apiserver v0.28.1 + k8s.io/apimachinery v0.28.2 + k8s.io/cli-runtime v0.28.2 + k8s.io/client-go v0.28.2 + k8s.io/code-generator v0.28.2 + k8s.io/component-base v0.28.2 k8s.io/cri-api v0.27.1 - k8s.io/gengo v0.0.0-20220913193501-391367153a38 + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a - k8s.io/kubectl v0.26.0 + k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d + k8s.io/kubectl v0.28.2 k8s.io/kubelet v0.26.1 - k8s.io/metrics v0.26.3 - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 - sigs.k8s.io/controller-runtime v0.14.6 - sigs.k8s.io/kustomize/kyaml v0.13.9 + k8s.io/metrics v0.28.2 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/controller-runtime v0.15.2 + sigs.k8s.io/kustomize/kyaml v0.14.3 sigs.k8s.io/yaml v1.3.0 ) require ( cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go/compute v1.19.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.13.0 // indirect cloud.google.com/go/storage v1.29.0 // indirect + dario.cat/mergo v1.0.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/BurntSushi/toml v1.2.1 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/squirrel v1.5.3 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect - github.com/Microsoft/hcsshim v0.9.6 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.11.0 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acomagu/bufpipe v1.0.4 // indirect @@ -154,18 +154,18 @@ require ( github.com/chzyer/readline v1.5.1 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/apd/v3 v3.2.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect - github.com/containerd/containerd v1.6.18 // indirect - github.com/containers/image/v5 v5.24.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/containerd/containerd v1.7.6 // indirect + github.com/containers/image/v5 v5.26.2 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.1.7 // indirect - github.com/containers/storage v1.45.3 // indirect - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/containers/storage v1.48.1 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect - github.com/danieljoos/wincred v1.1.2 // indirect + github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/daviddengcn/go-colortext v1.0.0 // indirect github.com/deckarep/golang-set v1.8.0 // indirect @@ -173,9 +173,9 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 // indirect - github.com/docker/cli v23.0.1+incompatible // indirect + github.com/docker/cli v24.0.6+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -191,22 +191,21 @@ require ( github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fullstorydev/grpcurl v1.8.7 // indirect - github.com/fvbommel/sortorder v1.0.2 // indirect + github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-git/gcfg v1.5.0 // indirect github.com/go-git/go-billy/v5 v5.4.1 // indirect - github.com/go-gorp/gorp/v3 v3.0.2 // indirect + github.com/go-gorp/gorp/v3 v3.0.5 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-redis/redis/v7 v7.4.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect @@ -221,24 +220,24 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/goodhosts/hostsfile v0.1.1 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-containerregistry v0.14.0 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-containerregistry v0.16.1 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c // indirect - github.com/google/s2a-go v0.1.3 // indirect + github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.8.0 // indirect + github.com/googleapis/gax-go/v2 v2.9.1 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-getter v1.7.0 // indirect @@ -247,8 +246,8 @@ require ( github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/terraform-json v0.15.0 // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.14 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect @@ -259,14 +258,13 @@ require ( github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/jhump/protoreflect v1.14.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmoiron/sqlx v1.3.5 // indirect github.com/jonboulle/clockwork v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.16.3 // indirect - github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect + github.com/klauspost/compress v1.16.6 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kopia/kopia v0.10.7 // indirect github.com/kr/fs v0.1.0 // indirect github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 // indirect @@ -274,7 +272,7 @@ require ( github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible // indirect github.com/lestrrat-go/strftime v1.0.5 // indirect - github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect + github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lithammer/dedent v1.1.0 // indirect @@ -287,17 +285,18 @@ require ( github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect + github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/sys/mount v0.3.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modood/table v0.0.0-20220527013332-8d47e76dad33 // indirect @@ -311,9 +310,9 @@ require ( github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/runc v1.1.5 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect - github.com/opencontainers/selinux v1.10.2 // indirect + github.com/opencontainers/runc v1.1.7 // indirect + github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -322,104 +321,107 @@ require ( github.com/pkg/sftp v1.13.5 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/proglottis/gpgme v0.1.3 // indirect - github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0 // indirect + github.com/rancher/wharfie v0.6.2 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 // indirect - github.com/rivo/uniseg v0.4.3 // indirect - github.com/rubenv/sql-migrate v1.2.0 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rubenv/sql-migrate v1.3.1 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/shopspring/decimal v1.2.0 // indirect - github.com/sigstore/fulcio v1.0.0 // indirect - github.com/sigstore/rekor v1.2.0 // indirect - github.com/sigstore/sigstore v1.6.4 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sigstore/fulcio v1.3.1 // indirect + github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect + github.com/sigstore/sigstore v1.7.1 // indirect github.com/skeema/knownhosts v1.1.0 // indirect github.com/smartystreets/assertions v1.0.0 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/subosito/gotenv v1.4.2 // indirect - github.com/sylabs/sif/v2 v2.9.0 // indirect + github.com/sylabs/sif/v2 v2.11.5 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/tchap/go-patricia v2.3.0+incompatible // indirect + github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/theupdateframework/notary v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tj/go-spin v1.1.0 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect - github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect github.com/ulikunitz/xz v0.5.11 // indirect - github.com/vbatts/tar-split v0.11.2 // indirect - github.com/vbauerster/mpb/v7 v7.5.3 // indirect + github.com/vbatts/tar-split v0.11.3 // indirect + github.com/vbauerster/mpb/v8 v8.4.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - github.com/xlab/treeprint v1.1.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.12.1 // indirect go.etcd.io/bbolt v1.3.7 // indirect go.etcd.io/etcd/api/v3 v3.5.9 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/v2 v2.305.8 // indirect - go.etcd.io/etcd/etcdctl/v3 v3.5.6 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.8 // indirect - go.etcd.io/etcd/raft/v3 v3.5.8 // indirect - go.etcd.io/etcd/v3 v3.5.6 // indirect + go.etcd.io/etcd/client/v2 v2.305.9 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.9 // indirect + go.etcd.io/etcd/raft/v3 v3.5.9 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 // indirect - go.opentelemetry.io/otel/sdk v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 // indirect + go.opentelemetry.io/otel v1.15.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.0 // indirect + go.opentelemetry.io/otel/metric v0.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.15.0 // indirect + go.opentelemetry.io/otel/trace v1.15.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.starlark.net v0.0.0-20201006213952-227f4aabceb5 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20230221090011-e4bae7ad2296 // indirect + go.uber.org/multierr v1.11.0 // indirect + go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect golang.org/x/mod v0.11.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/term v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.3 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/api v0.122.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect + google.golang.org/api v0.124.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect - k8s.io/apiserver v0.26.3 // indirect - k8s.io/component-helpers v0.26.0 // indirect - oras.land/oras-go v1.2.2 // indirect + inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect + k8s.io/apiserver v0.28.1 // indirect + k8s.io/component-helpers v0.28.2 // indirect + oras.land/oras-go v1.2.4 // indirect periph.io/x/host/v3 v3.8.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) replace ( - github.com/docker/cli => github.com/docker/cli v20.10.14+incompatible + github.com/docker/cli => github.com/docker/cli v24.0.6+incompatible github.com/docker/distribution => github.com/docker/distribution v2.8.2+incompatible - github.com/docker/docker => github.com/moby/moby v20.10.14+incompatible + github.com/docker/docker => github.com/moby/moby v24.0.6+incompatible github.com/spf13/afero => github.com/spf13/afero v1.2.2 google.golang.org/grpc => google.golang.org/grpc v1.53.0 ) diff --git a/go.sum b/go.sum index 046099b5d2f..17eaec0d452 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/creachadair/shell v0.0.7 h1:Z96pB6DkSb7F3Y3BBnJeOZH2gazyMTWlvecSD4vDqfk= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -119,8 +118,8 @@ cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -230,7 +229,6 @@ cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6 cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= @@ -396,100 +394,79 @@ cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vf cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cuelang.org/go v0.6.0 h1:dJhgKCog+FEZt7OwAYV1R+o/RZPmE8aqFoptmxSWyr8= cuelang.org/go v0.6.0/go.mod h1:9CxOX8aawrr3BgSdqPj7V0RYoXo7XIb+yDFC6uESrOQ= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.3 h1:8LoU8N2lIUzkmstvwXvVfniMZlFbesfT2AmA1aqvRr8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.3/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= +github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= -github.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= -github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/Microsoft/hcsshim v0.11.0 h1:7EFNIY4igHEXUdj1zXgAyU3fLc7QfOKHbkldRVTBdiM= +github.com/Microsoft/hcsshim v0.11.0/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= +github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 h1:JMDGhoQvXNTqH6Y3MC0IUw6tcZvaUdujNqzK2HYWZc8= +github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -503,6 +480,7 @@ github.com/StudioSol/set v1.0.0 h1:G27J71la+Da08WidabBkoRrvPLTa4cdCn0RjvyJ5WKQ= github.com/StudioSol/set v1.0.0/go.mod h1:hIUNZPo6rEGF43RlPXHq7Fjmf+HkVJBqAjtK7Z9LoIU= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= @@ -510,13 +488,9 @@ github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412 h1:vOVO0ypMfTt6tZacyI0kp+iCZb1XSNiYDqnzBWYgfe4= github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412/go.mod h1:AI9hp1tkp10pAlK5TCwL+7yWbRgtDm9jhToq6qij2xs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -528,9 +502,9 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= @@ -549,7 +523,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bhmj/jsonslice v1.1.2 h1:Lzen2S9iG3HsESpiIAnTM7Obs1QiTz83ZXa5YrpTTWI= github.com/bhmj/jsonslice v1.1.2/go.mod h1:O3ZoA0zdEefdbk1dkU5aWPOA36zQhhS/HV6RQFLTlnU= @@ -557,11 +530,8 @@ github.com/bhmj/xpression v0.9.1 h1:N7bX/nWx9oFi/zsiMTx2ehoRApTDAWdQadq/5o2wMGk= github.com/bhmj/xpression v0.9.1/go.mod h1:j9oYmEXJjeL9mrgW1+ZDBKJXnbupsCPGhlO9J5YhS1Q= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -570,37 +540,29 @@ github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yq github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v2.1.2+incompatible h1:E7dor84qzwUO8KdCM68CZwq9QOSR7HXlLx3Wj5vui2s= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3pU= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f h1:tRk+aBit+q3oqnj/1mF5HHhP2yxJM2lSa0afOJxQ3nE= github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230423031423-0b31a519b502 h1:dlu7F5rX2PA4laECDbFXwtDKktUK31lcC09wU70L3QY= -github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230423031423-0b31a519b502/go.mod h1:5qllHIhMkPEWjIimDum42JtMj0P1Tn9x91XUceuPNjY= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230912020346-a5d89c1c90ad h1:DVxCvjXlmkm4idu4bAbI9P+D99BsVHTKOKbzRYTlFwU= +github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230912020346-a5d89c1c90ad/go.mod h1:Yi/tSmvDrnFgyZN4bsXm3gfXrp3zo1uytHmnPEYfquM= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chmduquesne/rollinghash v4.0.0+incompatible h1:hnREQO+DXjqIw3rUTzWN7/+Dpw+N5Um8zpKV0JOEgbo= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -613,11 +575,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/clbanning/mxj/v2 v2.5.7 h1:7q5lvUpaPF/WOkqgIDiwjBJaznaLCCBd78pi8ZyAnE0= github.com/clbanning/mxj/v2 v2.5.7/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= @@ -626,161 +583,68 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd/v3 v3.2.0 h1:79kHCn4tO0VGu3W0WujYrMjBDk8a2H4KEUYcXf7whcg= github.com/cockroachdb/apd/v3 v3.2.0/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns= -github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= +github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= +github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/common v0.49.1 h1:6y4/s2WwYxrv+Cox7fotOo316wuZI+iKKPUQweCYv50= -github.com/containers/common v0.49.1/go.mod h1:ueM5hT0itKqCQvVJDs+EtjornAQtrHYxQJzP2gxeGIg= -github.com/containers/image/v5 v5.24.0 h1:2Pu8ztTntqNxteVN15bORCQnM8rfnbYuyKwUiiKUBuc= -github.com/containers/image/v5 v5.24.0/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc= +github.com/containers/common v0.55.4 h1:7IxB/G5qtDU+rp1YiVWkDpd+ZC4ZlCQ7k2jZJYkB/R8= +github.com/containers/common v0.55.4/go.mod h1:5mVCpfMBWyO+zaD7Fw+DBHFa42YFKROwle1qpEKcX3U= +github.com/containers/image/v5 v5.26.2 h1:JX44e1qkdHiL60eooZZcIgKMBp/Ue2AIdKeBG71CYEg= +github.com/containers/image/v5 v5.26.2/go.mod h1:CTS9VmzgKk8HTSPfPDkthBvRh/2GnFL8K8WBedn3L7I= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw= -github.com/containers/storage v1.45.3 h1:GbtTvTtp3GW2/tcFg5VhgHXcYMwVn2KfZKiHjf9FAOM= -github.com/containers/storage v1.45.3/go.mod h1:OdRUYHrq1HP6iAo79VxqtYuJzC5j4eA2I60jKOoCT7g= +github.com/containers/storage v1.48.1 h1:mMdr6whnMu8jJ1dO+tKaeSNbu6XJYSufWQF20uLr9Og= +github.com/containers/storage v1.48.1/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM= +github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= -github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= +github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/dapr/kit v0.11.3 h1:u1X92tE8xsrwXIej7nkcI5Z1t1CFznPwlL18tizNEw4= github.com/dapr/kit v0.11.3/go.mod h1:hQA6xOhcLAiccXTj7e3/bzpHwvAJCSCp70p2xg3jB40= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -806,21 +670,19 @@ github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aB github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v20.10.14+incompatible h1:dSBKJOVesDgHo7rbxlYjYsXe7gPzrTT+/cKQgpDAazg= -github.com/docker/cli v20.10.14+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.1-0.20190612165340-fd1b1942c4d5 h1:2o8D0hdBky229bNnc7a8bAZkeVMpH4qsp2Rmt4g/+Zk= github.com/docker/go-connections v0.4.1-0.20190612165340-fd1b1942c4d5/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= @@ -831,7 +693,6 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= @@ -846,7 +707,6 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= @@ -856,17 +716,13 @@ github.com/emicklei/proto v1.10.0/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/estesp/manifest-tool/v2 v2.0.3 h1:F9HMOqcXvtW+8drQB+BjNRU/+bLXOwCfj3mbjqQC2Ns= github.com/estesp/manifest-tool/v2 v2.0.3/go.mod h1:Suh+tbKQvKHcs4Vltzy8gwZk1y9eSRI635gT4gFw5Ss= -github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -880,34 +736,31 @@ github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGF github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/fullstorydev/grpcurl v1.8.7 h1:xJWosq3BQovQ4QrdPO72OrPiWuGgEsxY8ldYsJbPrqI= -github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E= -github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo= -github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v1.6.3 h1:HCeeRluvAgMusMomi1+6Y5dmFOdYV/JzoRrrbFlkGIc= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-errors/errors v1.4.0 h1:2OA7MFw38+e9na72T1xgkomPb6GzZzzxvJ5U630FoRM= -github.com/go-errors/errors v1.4.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= @@ -920,10 +773,9 @@ github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= -github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= +github.com/go-gorp/gorp/v3 v3.0.5 h1:PUjzYdYu3HBOh8LE+UUmRG2P0IRDak9XMeGNvaeq4Ow= +github.com/go-gorp/gorp/v3 v3.0.5/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -959,8 +811,8 @@ github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwoh github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= @@ -982,8 +834,9 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-quicktest/qt v1.100.0 h1:I7iSLgIwNp0E0UnSvKJzs7ig0jg/Iq83zsZjtQNW7jY= @@ -992,8 +845,6 @@ github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRf github.com/go-redis/redismock/v9 v9.0.3 h1:mtHQi2l51lCmXIbTRTqb1EiHYe9tL5Yk5oorlSJJqR0= github.com/go-redis/redismock/v9 v9.0.3/go.mod h1:F6tJRfnU8R/NZ0E+Gjvoluk14MqMC5ueSZX6vVQypc0= github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= @@ -1034,39 +885,29 @@ github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXs github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1116,13 +957,12 @@ github.com/goodhosts/hostsfile v0.1.1 h1:SqRUTFOshOCon0ZSXDrW1bkKZvs4+5pRgYFWySd github.com/goodhosts/hostsfile v0.1.1/go.mod h1:lXcUP8xO4WR5vvuQ3F/N0bMQoclOtYKEEUnyY2jTusY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.3 h1:WEb38wcTe0EuAvg7USzgklnOjjnlMaahYO3faaqnCn8= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/certificate-transparency-go v1.1.5 h1:EVfYyOiMSdwwXd6FJxnh0jYgYj/Dh5n9sXtgIr5+Vj0= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -1139,9 +979,8 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= -github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1168,15 +1007,13 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c h1:lvddKcYTQ545ADhBujtIJmqQrZBDsGo7XIMbAQe/sNY= -github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.3 h1:FAgZmpLl/SXurPEZyCMPBIiiYeTbqfjlbdnCNTAkbGE= -github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/trillian v1.5.2 h1:roGP6G8aaAch7vP08+oitPkvmZzxjTfIkguozqJ04Ok= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1197,8 +1034,8 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= -github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.9.1 h1:DpTpJqzZ3NvX9zqjhIuI1oVzYZMvboZe+3LoeEIJjHM= +github.com/googleapis/gax-go/v2 v2.9.1/go.mod h1:4FG3gMrVZlyMp5itSYKMU9z/lBE7+SbnUOvzH2HqbEY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -1206,12 +1043,10 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -1221,26 +1056,24 @@ github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16 github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -1249,14 +1082,13 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= -github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= +github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= @@ -1285,29 +1117,27 @@ github.com/hashicorp/terraform-exec v0.18.0 h1:BJa6/Fhxnb0zvsEGqUrFSybcnhAiBVSUg github.com/hashicorp/terraform-exec v0.18.0/go.mod h1:6PMRgg0Capig5Fn0zW9/+WM3vQsdwotwa8uxDVzLpHE= github.com/hashicorp/terraform-json v0.15.0 h1:/gIyNtR6SFw6h5yzlbDbACyGvIhKtQi8mTsbkNd79lE= github.com/hashicorp/terraform-json v0.15.0/go.mod h1:+L1RNzjDU5leLFZkHTFTbJXaoqUC6TqXlFgDoOXrtvk= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo= +github.com/imdario/mergo v0.3.14/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= @@ -1334,13 +1164,6 @@ github.com/jedib0t/go-pretty/v6 v6.4.6 h1:v6aG9h6Uby3IusSSEjHaZNXpHFhzqMmjXcPq1R github.com/jedib0t/go-pretty/v6 v6.4.6/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= -github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= -github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= -github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= -github.com/jhump/protoreflect v1.14.1/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc= @@ -1353,16 +1176,13 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1375,9 +1195,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/k3d-io/k3d/v5 v5.4.4 h1:txQNPNhxuSDRU1+dN9wDjliQp9DslK76gVplqWjQDjg= -github.com/k3d-io/k3d/v5 v5.4.4/go.mod h1:+eDAoEC4G3bJ4daG2h68kY+L4mCkV7GU4TTGXhfSk0c= +github.com/k3d-io/k3d/v5 v5.5.2 h1:VEkopEqTUBpGJghjltWqv1jI57MLKFaxWt2yBp2lZmE= +github.com/k3d-io/k3d/v5 v5.5.2/go.mod h1:PA0IkO8CB2OsBpBO3rJwskmA69Ibb9qdFiUGE/8IqUA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -1389,15 +1208,13 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY= -github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= +github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0= -github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1410,7 +1227,9 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1440,16 +1259,15 @@ github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible h1:Y6sqxHMyB1D2YSzWkL github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible/go.mod h1:ZQnN8lSECaebrkQytbHj4xNgtg8CR7RYXnPok8e0EHA= github.com/lestrrat-go/strftime v1.0.5 h1:A7H3tT8DhTz8u65w+JRpiBxM4dINQhUXAZnhBa2xeOE= github.com/lestrrat-go/strftime v1.0.5/go.mod h1:E1nN3pCbtMSu1yjSVeyuRFVm/U0xoR76fd03sz+Qz4g= -github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8= -github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA= +github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I= +github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0= github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/longhorn/go-iscsi-helper v0.0.0-20210330030558-49a327fb024e h1:hz4quJkaJWDo+xW+G6wTF6d6/95QvJ+o2D0+bB/tJ1U= @@ -1485,12 +1303,19 @@ github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlW github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= @@ -1500,34 +1325,28 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/minio-go/v7 v7.0.23 h1:NleyGQvAn9VQMU+YHVrgV4CX+EPtxPt/78lHOOTncy4= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14= -github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -1555,21 +1374,18 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/moby v20.10.14+incompatible h1:J47P0p+O49F3au8QyE34dE/qXz571kcVmsbx8bvEuS0= -github.com/moby/moby v20.10.14+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/moby v24.0.6+incompatible h1:O/XZsZtaOVTYszsJQlr9pN1Zo1aRSH0KCWAIa6Kpm3s= +github.com/moby/moby v24.0.6+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0= -github.com/moby/sys/mount v0.3.0/go.mod h1:U2Z3ur2rXPFrFmy4q6WMwWrBOAQGYtYTRVM8BIvzbwk= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1588,81 +1404,56 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto= github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= +github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= -github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= +github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= -github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= +github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= +github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= @@ -1672,7 +1463,6 @@ github.com/pashagolub/pgxmock/v2 v2.11.0 h1:ZUKqZy5Zf/5WJjAXHErjHngJBW5/3fEujGD+ github.com/pashagolub/pgxmock/v2 v2.11.0/go.mod h1:D3YslkN/nJ4+umVqWmbwfSXugJIjPMChkGBG47OJpNw= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= @@ -1687,7 +1477,6 @@ github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1697,11 +1486,12 @@ github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfx github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= @@ -1709,13 +1499,12 @@ github.com/prometheus-community/pro-bing v0.3.0 h1:SFT6gHqXwbItEDJhTkzPWVqU6CLEt github.com/prometheus-community/pro-bing v0.3.0/go.mod h1:p9dLb9zdmv+eLxWfCT6jESWuDrS+YzpPkQBgysQF8a0= github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1725,29 +1514,26 @@ github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUo github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0 h1:sadMIsgmHpEOGbUs6VtHBXRR1OHevnj7hLx9ZcdNGW4= github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= +github.com/rancher/dynamiclistener v0.3.5 h1:5TaIHvkDGmZKvc96Huur16zfTKOiLhDtK4S+WV0JA6A= +github.com/rancher/wharfie v0.6.2 h1:ZTrZ0suU0abWwLLf2zaqjhwpxK8+BkbnMocnU2u1bSQ= +github.com/rancher/wharfie v0.6.2/go.mod h1:7ii0+eehBwUEFaJMiRHWCbvN11bsfVHT1oc+P/6IBSg= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o= @@ -1759,8 +1545,8 @@ github.com/replicatedhq/troubleshoot v0.57.0/go.mod h1:R5VdixzaBXfWLbP9mcLuZKs/b github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 h1:mZHayPoR0lNmnHyvtYjDeq0zlVHn9K/ZXoy17ylucdo= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5/go.mod h1:GEXHk5HgEKCvEIIrSpFI3ozzG5xOKA2DVlEX/gGnewM= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1768,26 +1554,23 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= -github.com/rubenv/sql-migrate v1.2.0 h1:fOXMPLMd41sK7Tg75SXDec15k3zg5WNV6SjuDRiNfcU= -github.com/rubenv/sql-migrate v1.2.0/go.mod h1:Z5uVnq7vrIrPmHbVFfR4YLHRZquxeHpckCnRq0P/K9Y= +github.com/rubenv/sql-migrate v1.3.1 h1:Vx+n4Du8X8VTYuXbhNxdEUoh6wiJERA0GlWocR5FrbA= +github.com/rubenv/sql-migrate v1.3.1/go.mod h1:YzG/Vh82CwyhTFXy+Mf5ahAiiEOpAlHurg+23VEzcsk= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/sahilm/fuzzy v0.1.0 h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI= github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebdah/goldie v1.0.0 h1:9GNhIat69MSlz/ndaBg48vl9dF5fI+NBB6kfOxgfkMc= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1801,16 +1584,16 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/fulcio v1.0.0 h1:hBZW6qg9GXTtCX8jOg1hmyjYLrmsEKZGeMwAbW3XNEg= -github.com/sigstore/fulcio v1.0.0/go.mod h1:j4MzLxX/Be0rHYh3JF2dgMorkWGzEMHBqIHwFU8I/Rw= -github.com/sigstore/rekor v1.2.0 h1:ahlnoEY3zo8Vc+eZLPobamw6YfBTAbI0lthzUQd6qe4= -github.com/sigstore/rekor v1.2.0/go.mod h1:zcFO54qIg2G1/i0sE/nvmELUOng/n0MPjTszRYByVPo= -github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE= -github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= +github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= +github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI= +github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ= +github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks= +github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1820,6 +1603,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= @@ -1838,14 +1622,14 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= @@ -1856,7 +1640,6 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -1870,14 +1653,12 @@ github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOH github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1896,15 +1677,12 @@ github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8 github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/sykesm/zap-logfmt v0.0.4 h1:U2WzRvmIWG1wDLCFY3sz8UeEmsdHQjHFNlIdmroVFaI= github.com/sykesm/zap-logfmt v0.0.4/go.mod h1:AuBd9xQjAe3URrWT1BBDk2v2onAZHkZkWRMiYZXiZWA= -github.com/sylabs/sif/v2 v2.9.0 h1:q9K92j1QW4/QLOtKh9YZpJHrXav6x15AVhQGPVLcg+4= -github.com/sylabs/sif/v2 v2.9.0/go.mod h1:bRdFzcqif0eDjwx0isG4cgTFoKTQn/vfBXVSoP2rB2Y= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= +github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= -github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA= github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c= @@ -1919,31 +1697,19 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.7 h1:aXiFAgRugfJ27UFDsGJ9DB2FvTC73hlVXFSqq5bo9eU= -github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= -github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= -github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w= -github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= +github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= +github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo= +github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc= github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= @@ -1953,8 +1719,6 @@ github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgq github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmware-tanzu/velero v1.10.1 h1:6WYOolZIygHb8FOZtpp8vCqCuy5Mk3qBF1S65L5cjuo= github.com/vmware-tanzu/velero v1.10.1/go.mod h1:N0J+j8xGSmanGpy1zCRMH2DMGPpwkUj9EZIUXfOlanY= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -1972,13 +1736,12 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMc github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= @@ -2000,47 +1763,26 @@ github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0= -go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c= -go.etcd.io/etcd/client/v2 v2.305.8/go.mod h1:ZlAsxDK5/10I6xVHhFo9zinCMr/DDLKFetDDXlzKwqE= -go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk= +go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= +go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= -go.etcd.io/etcd/etcdctl/v3 v3.5.6 h1:HKJrMrJPhol/iM+JpUYLTpq5wIxCy19tGucQTtZ/2KA= -go.etcd.io/etcd/etcdctl/v3 v3.5.6/go.mod h1:w+nFnWbv2wlCwREmcw7y7UxvaP3H10eWSJeAkl6i860= -go.etcd.io/etcd/etcdutl/v3 v3.5.6 h1:Pdl25eFdeDfwutUUs13vXJKTqLbLymUpCCr+TWmCmd8= -go.etcd.io/etcd/etcdutl/v3 v3.5.6/go.mod h1:UnDF5aAyVxEP/VwiW0glKQ79E9/1uG4/NBVN4RLZ8v0= -go.etcd.io/etcd/pkg/v3 v3.5.6/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k= -go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU= -go.etcd.io/etcd/pkg/v3 v3.5.8/go.mod h1:C17MJkZHJIyJV+wWWx6Jz6YS6BfdkOnUkSwT9uuEO7s= -go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0= -go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4= -go.etcd.io/etcd/raft/v3 v3.5.8/go.mod h1:W6P5WxtOMfYNdLSEJX3vc8Pg6LOt+ewI9UCFKcnIexA= -go.etcd.io/etcd/server/v3 v3.5.6 h1:RXuwaB8AMiV62TqcqIt4O4bG8NWjsxOkDJVT3MZI5Ds= -go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo= -go.etcd.io/etcd/tests/v3 v3.5.6 h1:0akA+RdHfyRI9rRjAzoDo/VAK6Glon8RL/BdqdfHAqE= -go.etcd.io/etcd/tests/v3 v3.5.6/go.mod h1:jnYOS8eXVQnhxDIWX2YxzMG9SDkuDl6UHUm+mAKgxqg= -go.etcd.io/etcd/v3 v3.5.6 h1:Oct/Kxr9yfwZaMqD66euFBhr6KaLQYrzae5X1iQdXSA= -go.etcd.io/etcd/v3 v3.5.6/go.mod h1:xygb0bSpV6OprnKbOYLsctrbw9o4lKsgy2dCMUC4zVo= +go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= +go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= +go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= +go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= +go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= +go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= @@ -2058,34 +1800,28 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= -go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 h1:5jD3teb4Qh7mx/nfzq4jO2WFFpvXD0vYWFDrdvNWmXk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw= +go.opentelemetry.io/otel v1.15.0 h1:NIl24d4eiLJPM0vKn4HjLYM+UZf6gSfi9Z+NmCxkWbk= +go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.0 h1:ZSdnH1x5Gm/eUFNQquwSt4/LMCOqS6KPlI9qaTKx5Ho= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.0/go.mod h1:uOTV75+LOzV+ODmL8ahRLWkFA3eQcSC2aAsbxIu4duk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.0 h1:rk5I7PaOk5NGQHfHR2Rz6MgdA8AYQSHwsigFsOxEC1c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.0/go.mod h1:pvkFJxNUXyJ5i8u6m8NIcqkoOf/65VM2mSyBbBJfeVQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.0 h1:rHD0vfQbtki6/FnsMzTpAOgdv+Ku+T6R47MZXmgelf8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.0/go.mod h1:RPagkaZrpwD+rSwQjzos6rBLsHOvenOqufCj4/7I46E= +go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= +go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= +go.opentelemetry.io/otel/sdk v1.15.0 h1:jZTCkRRd08nxD6w7rIaZeDNGZGGQstH3SfLQ3ZsKICk= +go.opentelemetry.io/otel/sdk v1.15.0/go.mod h1:XDEMrYWzJ4YlC17i6Luih2lwDw2j6G0PkUfr1ZqE+rQ= +go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo= +go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.starlark.net v0.0.0-20201006213952-227f4aabceb5 h1:ApvY/1gw+Yiqb/FKeks3KnVPWpkR3xzij82XPKLjJVw= -go.starlark.net v0.0.0-20201006213952-227f4aabceb5/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -2099,8 +1835,8 @@ go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -2108,16 +1844,15 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= +go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= +go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230221090011-e4bae7ad2296 h1:QJ/xcIANMLApehfgPCHnfK1hZiaMmbaTVmPv7DAoTbo= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230221090011-e4bae7ad2296/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -2125,31 +1860,30 @@ golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaE golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -2190,12 +1924,12 @@ golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2212,7 +1946,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2230,13 +1963,11 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -2245,8 +1976,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -2266,6 +1997,7 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2324,6 +2056,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2331,39 +2064,28 @@ golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2372,23 +2094,12 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2397,16 +2108,12 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2414,13 +2121,10 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2441,8 +2145,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2450,18 +2155,20 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2475,15 +2182,13 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -2509,7 +2214,6 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2534,6 +2238,7 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2546,7 +2251,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2562,6 +2266,7 @@ golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2573,8 +2278,8 @@ golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNq golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= +gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2627,8 +2332,8 @@ google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91 google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.122.0 h1:zDobeejm3E7pEG1mNHvdxvjs5XJoCMzyNH+CmwL94Es= -google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.124.0 h1:dP6Ef1VgOGqQ8eiv4GiY8RhmeyqzovcXBYPDUYG8Syo= +google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2642,7 +2347,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -2651,7 +2355,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2666,14 +2369,12 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2703,7 +2404,6 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= @@ -2754,8 +2454,12 @@ google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZV google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2789,26 +2493,23 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= +gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -2821,7 +2522,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -2835,11 +2535,10 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -helm.sh/helm/v3 v3.11.1 h1:cmL9fFohOoNQf+wnp2Wa0OhNFH0KFnSzEkVxi3fcc3I= -helm.sh/helm/v3 v3.11.1/go.mod h1:z/Bu/BylToGno/6dtNGuSmjRqxKq5gaH+FU0BPO+AQ8= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +helm.sh/helm/v3 v3.12.3 h1:5y1+Sbty12t48T/t/CGNYUIME5BJ0WKfmW/sobYqkFg= +helm.sh/helm/v3 v3.12.3/go.mod h1:KPKQiX9IP5HX7o5YnnhViMnNuKiL/lJBVQ47GHe1R0k= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2847,82 +2546,57 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -inet.af/netaddr v0.0.0-20220617031823-097006376321 h1:B4dC8ySKTQXasnjDTMsoCMf1sQG4WsMej0WXaHxunmU= -inet.af/netaddr v0.0.0-20220617031823-097006376321/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= +inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU= +inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= -k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= -k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= -k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apiextensions-apiserver v0.28.1 h1:l2ThkBRjrWpw4f24uq0Da2HaEgqJZ7pcgiEUTKSmQZw= +k8s.io/apiextensions-apiserver v0.28.1/go.mod h1:sVvrI+P4vxh2YBBcm8n2ThjNyzU4BQGilCQ/JAY5kGs= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= -k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.26.3 h1:blBpv+yOiozkPH2aqClhJmJY+rp53Tgfac4SKPDJnU4= -k8s.io/apiserver v0.26.3/go.mod h1:CJe/VoQNcXdhm67EvaVjYXxR3QyfwpceKPuPaeLibTA= -k8s.io/cli-runtime v0.26.3 h1:3ULe0oI28xmgeLMVXIstB+ZL5CTGvWSMVMLeHxitIuc= -k8s.io/cli-runtime v0.26.3/go.mod h1:5YEhXLV4kLt/OSy9yQwtSSNZU2Z7aTEYta1A+Jg4VC4= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/apiserver v0.28.1 h1:dw2/NKauDZCnOUAzIo2hFhtBRUo6gQK832NV8kuDbGM= +k8s.io/apiserver v0.28.1/go.mod h1:d8aizlSRB6yRgJ6PKfDkdwCy2DXt/d1FDR6iJN9kY1w= +k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= +k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= -k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/code-generator v0.26.3 h1:DNYPsWoeFwmg4qFg97Z1cHSSv7KSG10mAEIFoZGTQM8= -k8s.io/code-generator v0.26.3/go.mod h1:ryaiIKwfxEJEaywEzx3dhWOydpVctKYbqLajJf0O8dI= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.26.3 h1:oC0WMK/ggcbGDTkdcqefI4wIZRYdK3JySx9/HADpV0g= -k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E= -k8s.io/component-helpers v0.26.0 h1:KNgwqs3EUdK0HLfW4GhnbD+q/Zl9U021VfIU7qoVYFk= -k8s.io/component-helpers v0.26.0/go.mod h1:jHN01qS/Jdj95WCbTe9S2VZ9yxpxXNY488WjF+yW4fo= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/code-generator v0.28.2 h1:u47guga1rCWLnEnffF09p+cqj8B20oHOLoQ1lb1HGtQ= +k8s.io/code-generator v0.28.2/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A= +k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= +k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= +k8s.io/component-helpers v0.28.2 h1:r/XJ265PMirW9EcGXr/F+2yWrLPo2I69KdvcY/h9HAo= +k8s.io/component-helpers v0.28.2/go.mod h1:pF1R5YWQ+sgf0i6EbVm+MQCzkYuqutDUibdrkvAa6aI= k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q= k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20220913193501-391367153a38 h1:yGN2TZt9XIl5wrcYaFtVMqzP2GIzX5gIcOObCZCuDeA= -k8s.io/gengo v0.0.0-20220913193501-391367153a38/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.19.12 h1:OwyNUe/7/gxzEnaLd3sC9Yrpx0fZAERzvFslX5Qq5g8= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= -k8s.io/kubectl v0.26.0 h1:xmrzoKR9CyNdzxBmXV7jW9Ln8WMrwRK6hGbbf69o4T0= -k8s.io/kubectl v0.26.0/go.mod h1:eInP0b+U9XUJWSYeU9XZnTA+cVYuWyl3iYPGtru0qhQ= +k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d h1:/CFeJBjBrZvHX09rObS2+2iEEDevMWYc1v3aIYAjIYI= +k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.28.2 h1:fOWOtU6S0smdNjG1PB9WFbqEIMlkzU5ahyHkc7ESHgM= +k8s.io/kubectl v0.28.2/go.mod h1:6EQWTPySF1fn7yKoQZHYf9TPwIl2AygHEcJoxFekr64= k8s.io/kubelet v0.26.1 h1:wQyCQYmLW6GN3v7gVTxnc3jAE4zMYDlzdF3FZV4rKas= k8s.io/kubelet v0.26.1/go.mod h1:gFVZ1Ab4XdjtnYdVRATwGwku7FhTxo6LVEZwYoQaDT8= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.26.3 h1:pHI8XtmBbGGdh7bL0s2C3v93fJfxyktHPAFsnRYnDTo= -k8s.io/metrics v0.26.3/go.mod h1:NNnWARAAz+ZJTs75Z66fJTV7jHcVb3GtrlDszSIr3fE= +k8s.io/metrics v0.28.2 h1:Z/oMk5SmiT/Ji1SaWOPfW2l9W831BLO9/XxDq9iS3ak= +k8s.io/metrics v0.28.2/go.mod h1:QTIIdjMrq+KodO+rmp6R9Pr1LZO8kTArNtkWoQXw0sw= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.2 h1:0E9tOHUfrNH7TCDk5KU0jVBEzCqbfdyuVfGmJ7ZeRPE= -oras.land/oras-go v1.2.2/go.mod h1:Apa81sKoZPpP7CDciE006tSZ0x3Q3+dOoBcMZ/aNxvw= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= +oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= periph.io/x/host/v3 v3.8.0 h1:T5ojZ2wvnZHGPS4h95N2ZpcCyHnsvH3YRZ1UUUiv5CQ= periph.io/x/host/v3 v3.8.0/go.mod h1:rzOLH+2g9bhc6pWZrkCrmytD4igwQ2vxFw6Wn6ZOlLY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -2930,21 +2604,17 @@ rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= -sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-runtime v0.15.2 h1:9V7b7SDQSJ08IIsJ6CY1CE85Okhp87dyTMNDG0FS7f4= +sigs.k8s.io/controller-runtime v0.15.2/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 h1:cDW6AVMl6t/SLuQaezMET8hgnadZGIAr8tUrxFVOrpg= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 h1:vq2TtoDcQomhy7OxXLUOzSbHMuMYq0Bjn93cDtJEdKw= +sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3/go.mod h1:/d88dHCvoy7d0AKFT0yytezSGZKjsZBVs9YTkBHSGFk= +sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= +sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/hack/client-sdk-gen.sh b/hack/client-sdk-gen.sh index cfbd263275d..471751320fa 100755 --- a/hack/client-sdk-gen.sh +++ b/hack/client-sdk-gen.sh @@ -26,7 +26,10 @@ fi CODE_GENERATOR_PATH=$(go list -f '{{.Dir}}' -m k8s.io/code-generator) -GENERATORS="all" # deepcopy,defaulter,client,lister,informer or all +# HACK: add exec permission to code generator scripts +chmod u+x ${CODE_GENERATOR_PATH}/*.sh + +GENERATORS="client,informer,lister" OUTPUT_PACKAGE="github.com/apecloud/kubeblocks/pkg/client" APIS_PACKAGE="github.com/apecloud/kubeblocks/apis" GROUP_VERSIONS="apps:v1alpha1 dataprotection:v1alpha1 extensions:v1alpha1 workloads:v1alpha1 storage:v1alpha1" diff --git a/internal/cli/cloudprovider/k3d.go b/internal/cli/cloudprovider/k3d.go index ee9d76d00da..29f681db68b 100644 --- a/internal/cli/cloudprovider/k3d.go +++ b/internal/cli/cloudprovider/k3d.go @@ -32,7 +32,7 @@ import ( "github.com/docker/go-connections/nat" "github.com/k3d-io/k3d/v5/pkg/actions" k3dClient "github.com/k3d-io/k3d/v5/pkg/client" - config "github.com/k3d-io/k3d/v5/pkg/config/v1alpha4" + config "github.com/k3d-io/k3d/v5/pkg/config/v1alpha5" l "github.com/k3d-io/k3d/v5/pkg/logger" "github.com/k3d-io/k3d/v5/pkg/runtimes" k3d "github.com/k3d-io/k3d/v5/pkg/types" diff --git a/internal/cli/cmd/backuprepo/create.go b/internal/cli/cmd/backuprepo/create.go index 14d867d93ba..57a0ae3857e 100644 --- a/internal/cli/cmd/backuprepo/create.go +++ b/internal/cli/cmd/backuprepo/create.go @@ -452,7 +452,7 @@ func registerFlagCompletionFunc(cmd *cobra.Command, f cmdutil.Factory) { util.CheckErr(cmd.RegisterFlagCompletionFunc( providerFlagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return utilcomp.CompGetResource(f, cmd, util.GVRToString(types.StorageProviderGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp + return utilcomp.CompGetResource(f, util.GVRToString(types.StorageProviderGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp })) util.CheckErr(cmd.RegisterFlagCompletionFunc( "pv-reclaim-policy", diff --git a/internal/cli/cmd/builder/template/mock_client.go b/internal/cli/cmd/builder/template/mock_client.go index ab596e4599a..470f78168a3 100644 --- a/internal/cli/cmd/builder/template/mock_client.go +++ b/internal/cli/cmd/builder/template/mock_client.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" cfgcore "github.com/apecloud/kubeblocks/internal/configuration/core" @@ -154,3 +155,11 @@ func (m *mockClient) Scheme() *runtime.Scheme { func (m *mockClient) RESTMapper() meta.RESTMapper { panic("implement me") } + +func (m *mockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + panic("implement me") +} + +func (m *mockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + panic("implement me") +} diff --git a/internal/cli/cmd/class/create.go b/internal/cli/cmd/class/create.go index 863d347b6af..20d2ec86701 100644 --- a/internal/cli/cmd/class/create.go +++ b/internal/cli/cmd/class/create.go @@ -269,7 +269,7 @@ func registerFlagCompletionFunc(cmd *cobra.Command, f cmdutil.Factory) { util.CheckErr(cmd.RegisterFlagCompletionFunc( "cluster-definition", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return utilcomp.CompGetResource(f, cmd, util.GVRToString(types.ClusterDefGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp + return utilcomp.CompGetResource(f, util.GVRToString(types.ClusterDefGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp })) util.CheckErr(cmd.RegisterFlagCompletionFunc( "type", diff --git a/internal/cli/cmd/cli.go b/internal/cli/cmd/cli.go index c3985733c57..8f136673817 100644 --- a/internal/cli/cmd/cli.go +++ b/internal/cli/cmd/cli.go @@ -119,7 +119,7 @@ func NewDefaultCliCmd() *cobra.Command { case "help", cobra.ShellCompRequestCmd, cobra.ShellCompNoDescRequestCmd: // Don't search for a plugin default: - if err := kccmd.HandlePluginCommand(pluginHandler, cmdPathPieces); err != nil { + if err := kccmd.HandlePluginCommand(pluginHandler, cmdPathPieces, true); err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(1) } @@ -260,7 +260,7 @@ func registerCompletionFuncForGlobalFlags(cmd *cobra.Command, f cmdutil.Factory) cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( "namespace", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return utilcomp.CompGetResource(f, cmd, "namespace", toComplete), cobra.ShellCompDirectiveNoFileComp + return utilcomp.CompGetResource(f, "namespace", toComplete), cobra.ShellCompDirectiveNoFileComp })) cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( "context", diff --git a/internal/cli/cmd/cluster/create.go b/internal/cli/cmd/cluster/create.go index 44d17d23a1e..acf054aa9b2 100755 --- a/internal/cli/cmd/cluster/create.go +++ b/internal/cli/cmd/cluster/create.go @@ -869,7 +869,7 @@ func registerFlagCompletionFunc(cmd *cobra.Command, f cmdutil.Factory) { util.CheckErr(cmd.RegisterFlagCompletionFunc( "cluster-definition", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return utilcomp.CompGetResource(f, cmd, util.GVRToString(types.ClusterDefGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp + return utilcomp.CompGetResource(f, util.GVRToString(types.ClusterDefGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp })) util.CheckErr(cmd.RegisterFlagCompletionFunc( "cluster-version", @@ -877,7 +877,7 @@ func registerFlagCompletionFunc(cmd *cobra.Command, f cmdutil.Factory) { var clusterVersion []string clusterDefinition, err := cmd.Flags().GetString("cluster-definition") if clusterDefinition == "" || err != nil { - clusterVersion = utilcomp.CompGetResource(f, cmd, util.GVRToString(types.ClusterVersionGVR()), toComplete) + clusterVersion = utilcomp.CompGetResource(f, util.GVRToString(types.ClusterVersionGVR()), toComplete) } else { label := fmt.Sprintf("%s=%s", constant.ClusterDefLabelKey, clusterDefinition) clusterVersion = util.CompGetResourceWithLabels(f, cmd, util.GVRToString(types.ClusterVersionGVR()), []string{label}, toComplete) diff --git a/internal/cli/cmd/cluster/create_util.go b/internal/cli/cmd/cluster/create_util.go index 987b582b6f8..5d4d0049478 100644 --- a/internal/cli/cmd/cluster/create_util.go +++ b/internal/cli/cmd/cluster/create_util.go @@ -115,7 +115,7 @@ func registerFlagCompFunc(cmd *cobra.Command, f cmdutil.Factory, c *cluster.Char label := fmt.Sprintf("%s=%s", constant.ClusterDefLabelKey, c.ClusterDef) versions = util.CompGetResourceWithLabels(f, cmd, util.GVRToString(types.ClusterVersionGVR()), []string{label}, toComplete) } else { - versions = utilcomp.CompGetResource(f, cmd, util.GVRToString(types.ClusterVersionGVR()), toComplete) + versions = utilcomp.CompGetResource(f, util.GVRToString(types.ClusterVersionGVR()), toComplete) } return versions, cobra.ShellCompDirectiveNoFileComp }) diff --git a/internal/cli/cmd/cluster/label.go b/internal/cli/cmd/cluster/label.go index 432536ad556..1945852b2c9 100644 --- a/internal/cli/cmd/cluster/label.go +++ b/internal/cli/cmd/cluster/label.go @@ -79,7 +79,6 @@ type LabelOptions struct { namespace string enforceNamespace bool dryRunStrategy cmdutil.DryRunStrategy - dryRunVerifier *resource.QueryParamVerifier builder *resource.Builder unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) @@ -142,11 +141,6 @@ func (o *LabelOptions) complete(cmd *cobra.Command, args []string) error { } o.builder = o.Factory.NewBuilder() o.unstructuredClientForMapping = o.Factory.UnstructuredClientForMapping - dynamicClient, err := o.Factory.DynamicClient() - if err != nil { - return err - } - o.dryRunVerifier = resource.NewQueryParamVerifier(dynamicClient, o.Factory.OpenAPIGetter(), resource.QueryParamDryRun) return nil } diff --git a/internal/cli/edit/custom_edit.go b/internal/cli/edit/custom_edit.go index 7f15a05046c..3c16f064d5a 100644 --- a/internal/cli/edit/custom_edit.go +++ b/internal/cli/edit/custom_edit.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/resource" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/cmd/util/editor" @@ -88,13 +87,8 @@ func (o *CustomEditOptions) Run(originalObj runtime.Object) error { edited = original } - dynamicClient, err := o.Factory.DynamicClient() - if err != nil { - return fmt.Errorf("failed to get dynamic client: %v", err) - } // apply validation - fieldValidationVerifier := resource.NewQueryParamVerifier(dynamicClient, o.Factory.OpenAPIGetter(), resource.QueryParamFieldValidation) - schemaValidator, err := o.Factory.Validator(metav1.FieldValidationStrict, fieldValidationVerifier) + schemaValidator, err := o.Factory.Validator(metav1.FieldValidationStrict) if err != nil { return fmt.Errorf("failed to get validator: %v", err) } diff --git a/internal/cli/patch/patch.go b/internal/cli/patch/patch.go index eeadffcedf1..ae3a90cd96e 100644 --- a/internal/cli/patch/patch.go +++ b/internal/cli/patch/patch.go @@ -65,7 +65,6 @@ type Options struct { namespace string enforceNamespace bool dryRunStrategy cmdutil.DryRunStrategy - dryRunVerifier *resource.QueryParamVerifier args []string builder *resource.Builder unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) @@ -116,11 +115,6 @@ func (o *Options) complete(cmd *cobra.Command) error { o.args = append([]string{util.GVRToString(o.GVR)}, o.Names...) o.builder = o.Factory.NewBuilder() o.unstructuredClientForMapping = o.Factory.UnstructuredClientForMapping - dynamicClient, err := o.Factory.DynamicClient() - if err != nil { - return err - } - o.dryRunVerifier = resource.NewQueryParamVerifier(dynamicClient, o.Factory.OpenAPIGetter(), resource.QueryParamDryRun) return nil } @@ -161,11 +155,6 @@ func (o *Options) Run(cmd *cobra.Command) error { name, namespace := info.Name, info.Namespace if o.dryRunStrategy != cmdutil.DryRunClient { mapping := info.ResourceMapping() - if o.dryRunStrategy == cmdutil.DryRunServer { - if err := o.dryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { - return err - } - } client, err := o.unstructuredClientForMapping(mapping) if err != nil { return err diff --git a/internal/cli/util/completion.go b/internal/cli/util/completion.go index b29f692b916..6e6e61d17eb 100644 --- a/internal/cli/util/completion.go +++ b/internal/cli/util/completion.go @@ -39,7 +39,7 @@ import ( func ResourceNameCompletionFunc(f cmdutil.Factory, gvr schema.GroupVersionResource) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - comps := utilcomp.CompGetResource(f, cmd, GVRToString(gvr), toComplete) + comps := utilcomp.CompGetResource(f, GVRToString(gvr), toComplete) seen := make(map[string]bool) var availableComps []string @@ -103,7 +103,7 @@ func CompGetFromTemplateWithLabels(template *string, f cmdutil.Factory, namespac o.LabelSelector = strings.Join(labels, ",") } - _ = o.Run(f, cmd, args) + _ = o.Run(f, args) var comps []string resources := strings.Split(buf.String(), " ") @@ -123,7 +123,7 @@ func RegisterClusterCompletionFunc(cmd *cobra.Command, f cmdutil.Factory) { cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( "cluster", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return utilcomp.CompGetResource(f, cmd, GVRToString(types.ClusterGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp + return utilcomp.CompGetResource(f, GVRToString(types.ClusterGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp }, )) } diff --git a/internal/cli/util/flags/flags.go b/internal/cli/util/flags/flags.go index 5335877cf07..3d4c9000662 100644 --- a/internal/cli/util/flags/flags.go +++ b/internal/cli/util/flags/flags.go @@ -40,7 +40,7 @@ func AddClusterDefinitionFlag(f cmdutil.Factory, cmd *cobra.Command, p *string) cmd.Flags().StringVar(p, "cluster-definition", *p, "Specify cluster definition, run \"kbcli clusterdefinition list\" to show all available cluster definition") util.CheckErr(cmd.RegisterFlagCompletionFunc("cluster-definition", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return utilcomp.CompGetResource(f, cmd, util.GVRToString(types.ClusterDefGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp + return utilcomp.CompGetResource(f, util.GVRToString(types.ClusterDefGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp })) } diff --git a/internal/controller/handler/handler_builder.go b/internal/controller/handler/handler_builder.go index 947b145ef63..5cb345fb3fe 100644 --- a/internal/controller/handler/handler_builder.go +++ b/internal/controller/handler/handler_builder.go @@ -20,6 +20,8 @@ along with this program. If not, see . package handler import ( + "context" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -50,7 +52,7 @@ func (builder *realBuilder) AddFinder(finder Finder) Builder { } func (builder *realBuilder) Build() handler.EventHandler { - fn := func(obj client.Object) []reconcile.Request { + fn := func(ctx context.Context, obj client.Object) []reconcile.Request { var key *model.GVKNObjKey for i, finder := range builder.finders { key = finder.Find(builder.ctx, obj) diff --git a/internal/controller/handler/handler_builder_test.go b/internal/controller/handler/handler_builder_test.go index 424f99ad115..03d900d053d 100644 --- a/internal/controller/handler/handler_builder_test.go +++ b/internal/controller/handler/handler_builder_test.go @@ -104,22 +104,22 @@ var _ = Describe("handler builder test.", func() { }{ { name: "Create", - testFunc: func() { handler.Create(createEvent, queue) }, + testFunc: func() { handler.Create(ctx, createEvent, queue) }, getTimes: 1, }, { name: "Update", - testFunc: func() { handler.Update(updateEvent, queue) }, + testFunc: func() { handler.Update(ctx, updateEvent, queue) }, getTimes: 2, }, { name: "Delete", - testFunc: func() { handler.Delete(deleteEvent, queue) }, + testFunc: func() { handler.Delete(ctx, deleteEvent, queue) }, getTimes: 1, }, { name: "Generic", - testFunc: func() { handler.Generic(genericEvent, queue) }, + testFunc: func() { handler.Generic(ctx, genericEvent, queue) }, getTimes: 1, }, } diff --git a/internal/webhook/pod_admission.go b/internal/webhook/pod_admission.go deleted file mode 100644 index 44846baa531..00000000000 --- a/internal/webhook/pod_admission.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package webhook - -import ( - "context" - "net/http" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" -) - -// PodCreateHandler handles Pod -type PodCreateHandler struct { - Client client.Client - Decoder *admission.Decoder -} - -func init() { - HandlerMap["/mutate-v1-pod"] = &PodCreateHandler{} -} - -var _ admission.Handler = &PodCreateHandler{} - -// Handle handles admission requests. -func (h *PodCreateHandler) Handle(ctx context.Context, req admission.Request) admission.Response { - pod := &corev1.Pod{} - - err := h.Decoder.Decode(req, pod) - if err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - // mutate the fields in pod - - // // when pod.namespace is empty, using req.namespace - // if pod.Namespace == "" { - // pod.Namespace = req.Namespace - // } - - // marshaledPod, err := json.Marshal(pod) - // if err != nil { - // return admission.Errored(http.StatusInternalServerError, err) - // } - // return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod) - - return admission.Allowed("") -} - -var _ inject.Client = &PodCreateHandler{} - -// InjectClient injects the client into the PodCreateHandler -func (h *PodCreateHandler) InjectClient(c client.Client) error { - h.Client = c - return nil -} - -var _ admission.DecoderInjector = &PodCreateHandler{} - -// InjectDecoder injects the decoder into the PodCreateHandler -func (h *PodCreateHandler) InjectDecoder(d *admission.Decoder) error { - h.Decoder = d - return nil -} diff --git a/internal/webhook/webhook.go b/internal/webhook/webhook.go deleted file mode 100644 index 068d891be5d..00000000000 --- a/internal/webhook/webhook.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package webhook - -import ( - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" -) - -var ( - setupLog = ctrl.Log.WithName("webhook-setup") - // HandlerMap contains all admission webhook handlers. - HandlerMap = map[string]admission.Handler{} -) - -func SetupWithManager(mgr manager.Manager) error { - server := mgr.GetWebhookServer() - // register admission handlers - for path, handler := range HandlerMap { - server.Register(path, &webhook.Admission{Handler: handler}) - setupLog.Info("Registered webhook handler", "path", path) - } - return nil -} diff --git a/lorry/binding/etcd/etcd_test.go b/lorry/binding/etcd/etcd_test.go index 0ec8e460638..1c6cbf35da0 100644 --- a/lorry/binding/etcd/etcd_test.go +++ b/lorry/binding/etcd/etcd_test.go @@ -112,8 +112,8 @@ func (e *EmbeddedETCD) Start(peerAddress string) error { cfg.Dir = dir lpurl, _ := url.Parse("http://localhost:0") lcurl, _ := url.Parse(peerAddress) - cfg.LPUrls = []url.URL{*lpurl} - cfg.LCUrls = []url.URL{*lcurl} + cfg.ListenPeerUrls = []url.URL{*lpurl} + cfg.ListenClientUrls = []url.URL{*lcurl} e.ETCD, err = embed.StartEtcd(cfg) if err != nil { return err diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go deleted file mode 100644 index d4e0d371e5b..00000000000 --- a/pkg/client/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_backuppolicytemplate.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_backuppolicytemplate.go index 007195219cc..bc12fb95324 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_backuppolicytemplate.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_backuppolicytemplate.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeBackupPolicyTemplates struct { Fake *FakeAppsV1alpha1 } -var backuppolicytemplatesResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "backuppolicytemplates"} +var backuppolicytemplatesResource = v1alpha1.SchemeGroupVersion.WithResource("backuppolicytemplates") -var backuppolicytemplatesKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "BackupPolicyTemplate"} +var backuppolicytemplatesKind = v1alpha1.SchemeGroupVersion.WithKind("BackupPolicyTemplate") // Get takes name of the backupPolicyTemplate, and returns the corresponding backupPolicyTemplate object, and an error if there is any. func (c *FakeBackupPolicyTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupPolicyTemplate, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_cluster.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_cluster.go index 468a18b4a76..737db9f313e 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_cluster.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_cluster.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeClusters struct { ns string } -var clustersResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "clusters"} +var clustersResource = v1alpha1.SchemeGroupVersion.WithResource("clusters") -var clustersKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "Cluster"} +var clustersKind = v1alpha1.SchemeGroupVersion.WithKind("Cluster") // Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. func (c *FakeClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterdefinition.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterdefinition.go index 0eb0185053d..f00872aaaa2 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterdefinition.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterdefinition.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeClusterDefinitions struct { Fake *FakeAppsV1alpha1 } -var clusterdefinitionsResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "clusterdefinitions"} +var clusterdefinitionsResource = v1alpha1.SchemeGroupVersion.WithResource("clusterdefinitions") -var clusterdefinitionsKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "ClusterDefinition"} +var clusterdefinitionsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterDefinition") // Get takes name of the clusterDefinition, and returns the corresponding clusterDefinition object, and an error if there is any. func (c *FakeClusterDefinitions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterDefinition, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterversion.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterversion.go index 83bcda7413a..bb34c76cf3d 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterversion.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_clusterversion.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeClusterVersions struct { Fake *FakeAppsV1alpha1 } -var clusterversionsResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "clusterversions"} +var clusterversionsResource = v1alpha1.SchemeGroupVersion.WithResource("clusterversions") -var clusterversionsKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "ClusterVersion"} +var clusterversionsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterVersion") // Get takes name of the clusterVersion, and returns the corresponding clusterVersion object, and an error if there is any. func (c *FakeClusterVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterVersion, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentclassdefinition.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentclassdefinition.go index f0a434593af..be08339e185 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentclassdefinition.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentclassdefinition.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeComponentClassDefinitions struct { Fake *FakeAppsV1alpha1 } -var componentclassdefinitionsResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "componentclassdefinitions"} +var componentclassdefinitionsResource = v1alpha1.SchemeGroupVersion.WithResource("componentclassdefinitions") -var componentclassdefinitionsKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "ComponentClassDefinition"} +var componentclassdefinitionsKind = v1alpha1.SchemeGroupVersion.WithKind("ComponentClassDefinition") // Get takes name of the componentClassDefinition, and returns the corresponding componentClassDefinition object, and an error if there is any. func (c *FakeComponentClassDefinitions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ComponentClassDefinition, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentresourceconstraint.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentresourceconstraint.go index f90bd2bc090..3e4c24db277 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentresourceconstraint.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_componentresourceconstraint.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeComponentResourceConstraints struct { Fake *FakeAppsV1alpha1 } -var componentresourceconstraintsResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "componentresourceconstraints"} +var componentresourceconstraintsResource = v1alpha1.SchemeGroupVersion.WithResource("componentresourceconstraints") -var componentresourceconstraintsKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "ComponentResourceConstraint"} +var componentresourceconstraintsKind = v1alpha1.SchemeGroupVersion.WithKind("ComponentResourceConstraint") // Get takes name of the componentResourceConstraint, and returns the corresponding componentResourceConstraint object, and an error if there is any. func (c *FakeComponentResourceConstraints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ComponentResourceConstraint, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_configconstraint.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_configconstraint.go index c6c55dda196..5ae91311f2c 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_configconstraint.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_configconstraint.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeConfigConstraints struct { Fake *FakeAppsV1alpha1 } -var configconstraintsResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "configconstraints"} +var configconstraintsResource = v1alpha1.SchemeGroupVersion.WithResource("configconstraints") -var configconstraintsKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "ConfigConstraint"} +var configconstraintsKind = v1alpha1.SchemeGroupVersion.WithKind("ConfigConstraint") // Get takes name of the configConstraint, and returns the corresponding configConstraint object, and an error if there is any. func (c *FakeConfigConstraints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ConfigConstraint, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_opsrequest.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_opsrequest.go index c9b181c1e96..aeb5451ec86 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_opsrequest.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_opsrequest.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeOpsRequests struct { ns string } -var opsrequestsResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "opsrequests"} +var opsrequestsResource = v1alpha1.SchemeGroupVersion.WithResource("opsrequests") -var opsrequestsKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "OpsRequest"} +var opsrequestsKind = v1alpha1.SchemeGroupVersion.WithKind("OpsRequest") // Get takes name of the opsRequest, and returns the corresponding opsRequest object, and an error if there is any. func (c *FakeOpsRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.OpsRequest, err error) { diff --git a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_servicedescriptor.go b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_servicedescriptor.go index 1e0caf12bf8..b9bddc67d41 100644 --- a/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_servicedescriptor.go +++ b/pkg/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_servicedescriptor.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeServiceDescriptors struct { ns string } -var servicedescriptorsResource = schema.GroupVersionResource{Group: "apps.kubeblocks.io", Version: "v1alpha1", Resource: "servicedescriptors"} +var servicedescriptorsResource = v1alpha1.SchemeGroupVersion.WithResource("servicedescriptors") -var servicedescriptorsKind = schema.GroupVersionKind{Group: "apps.kubeblocks.io", Version: "v1alpha1", Kind: "ServiceDescriptor"} +var servicedescriptorsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceDescriptor") // Get takes name of the serviceDescriptor, and returns the corresponding serviceDescriptor object, and an error if there is any. func (c *FakeServiceDescriptors) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceDescriptor, err error) { diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backup.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backup.go index d0df0d9e38d..13b05e5e82c 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backup.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backup.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeBackups struct { ns string } -var backupsResource = schema.GroupVersionResource{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Resource: "backups"} +var backupsResource = v1alpha1.SchemeGroupVersion.WithResource("backups") -var backupsKind = schema.GroupVersionKind{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Kind: "Backup"} +var backupsKind = v1alpha1.SchemeGroupVersion.WithKind("Backup") // Get takes name of the backup, and returns the corresponding backup object, and an error if there is any. func (c *FakeBackups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Backup, err error) { diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuppolicy.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuppolicy.go index 75267050936..68b567006fa 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuppolicy.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuppolicy.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeBackupPolicies struct { ns string } -var backuppoliciesResource = schema.GroupVersionResource{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Resource: "backuppolicies"} +var backuppoliciesResource = v1alpha1.SchemeGroupVersion.WithResource("backuppolicies") -var backuppoliciesKind = schema.GroupVersionKind{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Kind: "BackupPolicy"} +var backuppoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("BackupPolicy") // Get takes name of the backupPolicy, and returns the corresponding backupPolicy object, and an error if there is any. func (c *FakeBackupPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupPolicy, err error) { diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuprepo.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuprepo.go index 6d71ea05822..37f77268ca2 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuprepo.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuprepo.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeBackupRepos struct { Fake *FakeDataprotectionV1alpha1 } -var backupreposResource = schema.GroupVersionResource{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Resource: "backuprepos"} +var backupreposResource = v1alpha1.SchemeGroupVersion.WithResource("backuprepos") -var backupreposKind = schema.GroupVersionKind{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Kind: "BackupRepo"} +var backupreposKind = v1alpha1.SchemeGroupVersion.WithKind("BackupRepo") // Get takes name of the backupRepo, and returns the corresponding backupRepo object, and an error if there is any. func (c *FakeBackupRepos) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupRepo, err error) { diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go index 668f8def63d..8ef14eefcf3 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeBackupTools struct { Fake *FakeDataprotectionV1alpha1 } -var backuptoolsResource = schema.GroupVersionResource{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Resource: "backuptools"} +var backuptoolsResource = v1alpha1.SchemeGroupVersion.WithResource("backuptools") -var backuptoolsKind = schema.GroupVersionKind{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Kind: "BackupTool"} +var backuptoolsKind = v1alpha1.SchemeGroupVersion.WithKind("BackupTool") // Get takes name of the backupTool, and returns the corresponding backupTool object, and an error if there is any. func (c *FakeBackupTools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupTool, err error) { diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go index 31d60fbde83..64e6993b258 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeRestoreJobs struct { ns string } -var restorejobsResource = schema.GroupVersionResource{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Resource: "restorejobs"} +var restorejobsResource = v1alpha1.SchemeGroupVersion.WithResource("restorejobs") -var restorejobsKind = schema.GroupVersionKind{Group: "dataprotection.kubeblocks.io", Version: "v1alpha1", Kind: "RestoreJob"} +var restorejobsKind = v1alpha1.SchemeGroupVersion.WithKind("RestoreJob") // Get takes name of the restoreJob, and returns the corresponding restoreJob object, and an error if there is any. func (c *FakeRestoreJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RestoreJob, err error) { diff --git a/pkg/client/clientset/versioned/typed/extensions/v1alpha1/fake/fake_addon.go b/pkg/client/clientset/versioned/typed/extensions/v1alpha1/fake/fake_addon.go index 866ad303948..c7368d9fa9f 100644 --- a/pkg/client/clientset/versioned/typed/extensions/v1alpha1/fake/fake_addon.go +++ b/pkg/client/clientset/versioned/typed/extensions/v1alpha1/fake/fake_addon.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeAddons struct { Fake *FakeExtensionsV1alpha1 } -var addonsResource = schema.GroupVersionResource{Group: "extensions.kubeblocks.io", Version: "v1alpha1", Resource: "addons"} +var addonsResource = v1alpha1.SchemeGroupVersion.WithResource("addons") -var addonsKind = schema.GroupVersionKind{Group: "extensions.kubeblocks.io", Version: "v1alpha1", Kind: "Addon"} +var addonsKind = v1alpha1.SchemeGroupVersion.WithKind("Addon") // Get takes name of the addon, and returns the corresponding addon object, and an error if there is any. func (c *FakeAddons) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Addon, err error) { diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageprovider.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageprovider.go index 325a724d114..fb2d05dbee0 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageprovider.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageprovider.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeStorageProviders struct { Fake *FakeStorageV1alpha1 } -var storageprovidersResource = schema.GroupVersionResource{Group: "storage.kubeblocks.io", Version: "v1alpha1", Resource: "storageproviders"} +var storageprovidersResource = v1alpha1.SchemeGroupVersion.WithResource("storageproviders") -var storageprovidersKind = schema.GroupVersionKind{Group: "storage.kubeblocks.io", Version: "v1alpha1", Kind: "StorageProvider"} +var storageprovidersKind = v1alpha1.SchemeGroupVersion.WithKind("StorageProvider") // Get takes name of the storageProvider, and returns the corresponding storageProvider object, and an error if there is any. func (c *FakeStorageProviders) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageProvider, err error) { diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go index 69b962e7026..fb5fded7d03 100644 --- a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go +++ b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeReplicatedStateMachines struct { ns string } -var replicatedstatemachinesResource = schema.GroupVersionResource{Group: "workloads", Version: "v1alpha1", Resource: "replicatedstatemachines"} +var replicatedstatemachinesResource = v1alpha1.SchemeGroupVersion.WithResource("replicatedstatemachines") -var replicatedstatemachinesKind = schema.GroupVersionKind{Group: "workloads", Version: "v1alpha1", Kind: "ReplicatedStateMachine"} +var replicatedstatemachinesKind = v1alpha1.SchemeGroupVersion.WithKind("ReplicatedStateMachine") // Get takes name of the replicatedStateMachine, and returns the corresponding replicatedStateMachine object, and an error if there is any. func (c *FakeReplicatedStateMachines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index 4abf200d5f3..39d453b4acd 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -170,7 +170,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -243,7 +243,7 @@ type SharedInformerFactory interface { // ForResource gives generic access to a shared informer of the matching type. ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - // InternalInformerFor returns the SharedIndexInformer for obj using an internal + // InformerFor returns the SharedIndexInformer for obj using an internal // client. InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer From d4889b331f894178d0a1a5ebb2811466223c9501 Mon Sep 17 00:00:00 2001 From: Wei Cao Date: Wed, 27 Sep 2023 16:22:07 +0800 Subject: [PATCH 49/58] chore: bump go from 1.20.5 to 1.21 --- .github/workflows/cicd-pull-request.yml | 4 ++-- .github/workflows/cicd-push.yml | 6 +++--- .github/workflows/codeql.yml | 2 +- .github/workflows/release-helm-chart.yml | 2 +- .github/workflows/release-image.yml | 4 ++-- .github/workflows/release-publish.yml | 2 +- .goreleaser.yaml | 2 +- docker/Dockerfile | 2 +- docker/Dockerfile-dataprotection | 2 +- docker/Dockerfile-dev | 2 +- docker/Dockerfile-probe | 2 +- docker/Dockerfile-tools | 2 +- 12 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/cicd-pull-request.yml b/.github/workflows/cicd-pull-request.yml index be9746019b9..65d981cd6ca 100644 --- a/.github/workflows/cicd-pull-request.yml +++ b/.github/workflows/cicd-pull-request.yml @@ -146,7 +146,7 @@ jobs: with: MAKE_OPS_PRE: "generate" IMG: "apecloud/kubeblocks" - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" BUILDX_PLATFORMS: "linux/amd64" DOCKERFILE_PATH: "./docker/Dockerfile" secrets: inherit @@ -158,7 +158,7 @@ jobs: with: MAKE_OPS_PRE: "generate test-go-generate" IMG: "apecloud/kubeblocks-tools" - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" BUILDX_PLATFORMS: "linux/amd64" DOCKERFILE_PATH: "./docker/Dockerfile-tools" secrets: inherit diff --git a/.github/workflows/cicd-push.yml b/.github/workflows/cicd-push.yml index 68bce4811b5..86136657854 100644 --- a/.github/workflows/cicd-push.yml +++ b/.github/workflows/cicd-push.yml @@ -11,7 +11,7 @@ on: env: GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} BASE_BRANCH: origin/main - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" jobs: trigger-mode: @@ -195,7 +195,7 @@ jobs: with: MAKE_OPS_PRE: "generate" IMG: "apecloud/kubeblocks" - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" BUILDX_PLATFORMS: "linux/amd64" DOCKERFILE_PATH: "./docker/Dockerfile" secrets: inherit @@ -207,7 +207,7 @@ jobs: with: MAKE_OPS_PRE: "generate test-go-generate" IMG: "apecloud/kubeblocks-tools" - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" BUILDX_PLATFORMS: "linux/amd64" DOCKERFILE_PATH: "./docker/Dockerfile-tools" secrets: inherit diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 31a0c296350..1b38f2a4bfb 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -8,7 +8,7 @@ on: env: BASE_BRANCH: origin/main - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" jobs: trigger-mode: diff --git a/.github/workflows/release-helm-chart.yml b/.github/workflows/release-helm-chart.yml index e31c0e2d26d..e46fb800de0 100644 --- a/.github/workflows/release-helm-chart.yml +++ b/.github/workflows/release-helm-chart.yml @@ -54,7 +54,7 @@ jobs: MAKE_OPS_PRE: "helm-package VERSION=${{ needs.chart-version.outputs.chart-version-bump }}" IMG: "apecloud/kubeblocks-charts" VERSION: "${{ needs.chart-version.outputs.chart-version }}" - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" APECD_REF: "v0.1.24" DOCKERFILE_PATH: "./docker/Dockerfile-charts" secrets: inherit diff --git a/.github/workflows/release-image.yml b/.github/workflows/release-image.yml index 4e7bf9d298a..eab510cd9ff 100644 --- a/.github/workflows/release-image.yml +++ b/.github/workflows/release-image.yml @@ -45,7 +45,7 @@ jobs: MAKE_OPS_PRE: "generate" IMG: "apecloud/kubeblocks" VERSION: "${{ needs.image-tag.outputs.tag-name }}" - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" APECD_REF: "v0.1.24" DOCKERFILE_PATH: "./docker/Dockerfile" secrets: inherit @@ -57,7 +57,7 @@ jobs: MAKE_OPS_PRE: "generate test-go-generate" IMG: "apecloud/kubeblocks-tools" VERSION: "${{ needs.image-tag.outputs.tag-name }}" - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" APECD_REF: "v0.1.24" DOCKERFILE_PATH: "./docker/Dockerfile-tools" secrets: inherit diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 6673ad9997f..4fe291849de 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -9,7 +9,7 @@ env: GH_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} TAG_NAME: ${{ github.ref_name }} - GO_VERSION: "1.20.5" + GO_VERSION: "1.21" CLI_NAME: 'kbcli' CLI_REPO: 'apecloud/kbcli' GITLAB_KBCLI_PROJECT_ID: 85948 diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 2fa8e063069..696d3c0bb3c 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -28,7 +28,7 @@ builds: - amd64 - arm64 env: - - ASSUME_NO_MOVING_GC_UNSAFE_RISK_IT_WITH=go1.20 + - ASSUME_NO_MOVING_GC_UNSAFE_RISK_IT_WITH=go1.21 - CGO_ENABLED=0 tags: - containers_image_openpgp diff --git a/docker/Dockerfile b/docker/Dockerfile index 284aecb43e4..dc92fe13020 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,7 @@ # Build the manager binary ARG DIST_IMG=gcr.io/distroless/static:nonroot -ARG GO_VERSION=1.20 +ARG GO_VERSION=1.21 FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} as builder diff --git a/docker/Dockerfile-dataprotection b/docker/Dockerfile-dataprotection index d06208bfa3d..d571c22f008 100644 --- a/docker/Dockerfile-dataprotection +++ b/docker/Dockerfile-dataprotection @@ -1,7 +1,7 @@ # Build the dataprotection binary ARG DIST_IMG=gcr.io/distroless/static:nonroot -ARG GO_VERSION=1.20 +ARG GO_VERSION=1.21 FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} as builder diff --git a/docker/Dockerfile-dev b/docker/Dockerfile-dev index 8ed81f28efe..b4dd8dda53d 100644 --- a/docker/Dockerfile-dev +++ b/docker/Dockerfile-dev @@ -1,7 +1,7 @@ # Based on https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/go/.devcontainer/base.Dockerfile # [Choice] Go version: 1, 1.19, 1.18, etc -ARG GOVERSION=1.20 +ARG GOVERSION=1.21 FROM golang:${GOVERSION}-bullseye # Copy library scripts to execute diff --git a/docker/Dockerfile-probe b/docker/Dockerfile-probe index 1e530e20f36..56dffc56e35 100644 --- a/docker/Dockerfile-probe +++ b/docker/Dockerfile-probe @@ -11,7 +11,7 @@ #TARGETARCH - Architecture from --platform, e.g. arm64 #TARGETVARIANT - used to set target ARM variant, e.g. v7 -ARG GO_VERSION=1.20 +ARG GO_VERSION=1.21 FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} as builder ARG TARGETOS diff --git a/docker/Dockerfile-tools b/docker/Dockerfile-tools index 2184145d54b..7110809899b 100644 --- a/docker/Dockerfile-tools +++ b/docker/Dockerfile-tools @@ -11,7 +11,7 @@ #TARGETARCH - Architecture from --platform, e.g. arm64 #TARGETVARIANT - used to set target ARM variant, e.g. v7 -ARG GO_VERSION=1.20 +ARG GO_VERSION=1.21 FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} as builder ARG TARGETOS From 45395dfb26e4d1963c2601931ad895a064b6625b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 09:39:32 +0800 Subject: [PATCH 50/58] chore(deps): bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 (#5053) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 24 ++--- go.sum | 304 ++++++++++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 278 insertions(+), 50 deletions(-) diff --git a/go.mod b/go.mod index c48b0cc0ded..964d117b144 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/jackc/pgx/v5 v5.4.3 github.com/jedib0t/go-pretty/v6 v6.4.6 github.com/json-iterator/go v1.1.12 - github.com/k3d-io/k3d/v5 v5.5.2 + github.com/k3d-io/k3d/v5 v5.6.0 github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 github.com/kubesphere/kubekey/v3 v3.0.7 @@ -60,14 +60,13 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.8 - github.com/opencontainers/image-spec v1.1.0-rc5 github.com/pashagolub/pgxmock/v2 v2.11.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 github.com/prometheus-community/pro-bing v0.3.0 github.com/redis/go-redis/v9 v9.0.5 github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851 - github.com/replicatedhq/troubleshoot v0.57.0 + github.com/replicatedhq/troubleshoot v0.10.1 github.com/robfig/cron/v3 v3.0.1 github.com/russross/blackfriday/v2 v2.1.0 github.com/sahilm/fuzzy v0.1.0 @@ -139,16 +138,16 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.0 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acomagu/bufpipe v1.0.4 // indirect - github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bhmj/xpression v0.9.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/chzyer/readline v1.5.1 // indirect @@ -164,7 +163,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/daviddengcn/go-colortext v1.0.0 // indirect @@ -172,7 +171,6 @@ require ( github.com/deislabs/oras v0.9.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 // indirect github.com/docker/cli v24.0.6+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect @@ -190,7 +188,6 @@ require ( github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-git/gcfg v1.5.0 // indirect github.com/go-git/go-billy/v5 v5.4.1 // indirect @@ -212,7 +209,6 @@ require ( github.com/go-test/deep v1.1.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -229,7 +225,6 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.9.1 // indirect - github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect @@ -276,7 +271,6 @@ require ( github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lithammer/dedent v1.1.0 // indirect - github.com/longhorn/go-iscsi-helper v0.0.0-20210330030558-49a327fb024e // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -310,6 +304,7 @@ require ( github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/runc v1.1.7 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/selinux v1.11.0 // indirect @@ -333,6 +328,7 @@ require ( github.com/rubenv/sql-migrate v1.3.1 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sergi/go-diff v1.2.0 // indirect + github.com/shirou/gopsutil v3.21.1+incompatible // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sigstore/fulcio v1.3.1 // indirect @@ -350,7 +346,6 @@ require ( github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/theupdateframework/notary v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/tj/go-spin v1.1.0 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect @@ -387,8 +382,7 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect + go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/term v0.12.0 // indirect @@ -407,11 +401,9 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect k8s.io/apiserver v0.28.1 // indirect k8s.io/component-helpers v0.28.2 // indirect oras.land/oras-go v1.2.4 // indirect - periph.io/x/host/v3 v3.8.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/go.sum b/go.sum index 17eaec0d452..5a9aa97dea2 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -396,7 +398,12 @@ cuelang.org/go v0.6.0 h1:dJhgKCog+FEZt7OwAYV1R+o/RZPmE8aqFoptmxSWyr8= cuelang.org/go v0.6.0/go.mod h1:9CxOX8aawrr3BgSdqPj7V0RYoXo7XIb+yDFC6uESrOQ= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= @@ -468,6 +475,7 @@ github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0g github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 h1:JMDGhoQvXNTqH6Y3MC0IUw6tcZvaUdujNqzK2HYWZc8= github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -476,6 +484,8 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/O github.com/Shopify/sarama v1.37.2 h1:LoBbU0yJPte0cE5TZCGdlzZRmMgMtZU/XgnUKZg9Cv4= github.com/Shopify/sarama v1.37.2/go.mod h1:Nxye/E+YPru//Bpaorfhc3JsSGYwCaDDj+R4bK52U5o= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/StudioSol/set v1.0.0 h1:G27J71la+Da08WidabBkoRrvPLTa4cdCn0RjvyJ5WKQ= github.com/StudioSol/set v1.0.0/go.mod h1:hIUNZPo6rEGF43RlPXHq7Fjmf+HkVJBqAjtK7Z9LoIU= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= @@ -485,12 +495,15 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412 h1:vOVO0ypMfTt6tZacyI0kp+iCZb1XSNiYDqnzBWYgfe4= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412/go.mod h1:AI9hp1tkp10pAlK5TCwL+7yWbRgtDm9jhToq6qij2xs= +github.com/alecthomas/gometalinter v2.0.11+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -505,11 +518,15 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/authzed/controller-idioms v0.7.0 h1:HhNMUBb8hJzYqY3mhen3B2AC5nsIem3fBe0tC/AAOHo= github.com/authzed/controller-idioms v0.7.0/go.mod h1:0B/PmqCguKv8b3azSMF+HdyKpKr2o3UAZ5eo12Ze8Fo= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.25.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.257 h1:HwelXYZZ8c34uFFhgVw3ybu2gB5fkk8KLj2idTvzZb8= github.com/aws/aws-sdk-go v1.44.257/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= @@ -532,9 +549,13 @@ github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENU github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= @@ -548,8 +569,6 @@ github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= -github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f h1:tRk+aBit+q3oqnj/1mF5HHhP2yxJM2lSa0afOJxQ3nE= -github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -566,6 +585,7 @@ github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230912020346-a5d89c1c90ad/go.mod h github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chmduquesne/rollinghash v4.0.0+incompatible h1:hnREQO+DXjqIw3rUTzWN7/+Dpw+N5Um8zpKV0JOEgbo= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.1.11-0.20160617073814-96a4d311aa9b/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -577,6 +597,7 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38 github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/clbanning/mxj/v2 v2.5.7 h1:7q5lvUpaPF/WOkqgIDiwjBJaznaLCCBd78pi8ZyAnE0= github.com/clbanning/mxj/v2 v2.5.7/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= @@ -591,6 +612,7 @@ github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd/v3 v3.2.0 h1:79kHCn4tO0VGu3W0WujYrMjBDk8a2H4KEUYcXf7whcg= github.com/cockroachdb/apd/v3 v3.2.0/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= @@ -622,27 +644,35 @@ github.com/containers/storage v1.48.1/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCm github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM= github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/dapr/kit v0.11.3 h1:u1X92tE8xsrwXIej7nkcI5Z1t1CFznPwlL18tizNEw4= @@ -667,7 +697,6 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= -github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= @@ -686,20 +715,22 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= @@ -707,8 +738,11 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/proto v1.10.0 h1:pDGyFRVV5RvV+nkBK9iy3q67FBy9Xa7vwrOTE+g5aGw= @@ -723,6 +757,8 @@ github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0+ github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/estesp/manifest-tool/v2 v2.0.3 h1:F9HMOqcXvtW+8drQB+BjNRU/+bLXOwCfj3mbjqQC2Ns= github.com/estesp/manifest-tool/v2 v2.0.3/go.mod h1:Suh+tbKQvKHcs4Vltzy8gwZk1y9eSRI635gT4gFw5Ss= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -740,9 +776,8 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= @@ -757,8 +792,11 @@ github.com/garyburd/redigo v1.6.3 h1:HCeeRluvAgMusMomi1+6Y5dmFOdYV/JzoRrrbFlkGIc github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= @@ -788,48 +826,80 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -837,14 +907,19 @@ github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/e github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-quicktest/qt v1.100.0 h1:I7iSLgIwNp0E0UnSvKJzs7ig0jg/Iq83zsZjtQNW7jY= +github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redismock/v9 v9.0.3 h1:mtHQi2l51lCmXIbTRTqb1EiHYe9tL5Yk5oorlSJJqR0= github.com/go-redis/redismock/v9 v9.0.3/go.mod h1:F6tJRfnU8R/NZ0E+Gjvoluk14MqMC5ueSZX6vVQypc0= github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= @@ -889,8 +964,6 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= @@ -908,12 +981,14 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -923,6 +998,7 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71 github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -981,8 +1057,10 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -1012,8 +1090,10 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHa github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1024,6 +1104,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1036,17 +1118,22 @@ github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMd github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gax-go/v2 v2.9.1 h1:DpTpJqzZ3NvX9zqjhIuI1oVzYZMvboZe+3LoeEIJjHM= github.com/googleapis/gax-go/v2 v2.9.1/go.mod h1:4FG3gMrVZlyMp5itSYKMU9z/lBE7+SbnUOvzH2HqbEY= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -1056,12 +1143,15 @@ github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16 github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= @@ -1077,9 +1167,11 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e/go.mod h1:/O1k/AizTN0QmfEKknCYGvICeyKUDqCYA8vvWtGWDeQ= github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= @@ -1099,6 +1191,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= @@ -1131,6 +1224,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo= @@ -1162,6 +1257,7 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedib0t/go-pretty/v6 v6.4.6 h1:v6aG9h6Uby3IusSSEjHaZNXpHFhzqMmjXcPq1Rjl9Jw= github.com/jedib0t/go-pretty/v6 v6.4.6/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE= @@ -1169,6 +1265,8 @@ github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8c github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc= github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -1185,6 +1283,7 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -1193,10 +1292,11 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k3d-io/k3d/v5 v5.5.2 h1:VEkopEqTUBpGJghjltWqv1jI57MLKFaxWt2yBp2lZmE= -github.com/k3d-io/k3d/v5 v5.5.2/go.mod h1:PA0IkO8CB2OsBpBO3rJwskmA69Ibb9qdFiUGE/8IqUA= +github.com/k3d-io/k3d/v5 v5.6.0 h1:XMRSQXyPErOcDCdOJVi6HUPjJZuWd/N6Dss7QeCDRhk= +github.com/k3d-io/k3d/v5 v5.6.0/go.mod h1:t/hRD2heCSkO9TJJdzFT72jXGCY8PjsCsClgjcmMoAA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -1231,6 +1331,7 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -1263,6 +1364,7 @@ github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0= github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -1270,11 +1372,9 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/longhorn/go-iscsi-helper v0.0.0-20210330030558-49a327fb024e h1:hz4quJkaJWDo+xW+G6wTF6d6/95QvJ+o2D0+bB/tJ1U= -github.com/longhorn/go-iscsi-helper v0.0.0-20210330030558-49a327fb024e/go.mod h1:9z/y9glKmWEdV50tjlUPxFwi1goQfIrrsoZbnMyIZbY= -github.com/longhorn/nsfilelock v0.0.0-20200723175406-fa7c83ad0003/go.mod h1:0CLeXlf59Lg6C0kjLSDf47ft73Dh37CwymYRKWwAn04= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1284,12 +1384,15 @@ github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPK github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/manifoldco/promptui v0.3.2/go.mod h1:8JU+igZ+eeiiRku4T5BjtKh2ms8sziGpSYl1gN8Bazw= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= @@ -1311,6 +1414,7 @@ github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqf github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -1335,6 +1439,7 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -1357,6 +1462,7 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -1413,8 +1519,11 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= +github.com/nicksnyder/go-i18n v1.10.1/go.mod h1:e4Di5xjP9oTVrC6y3C7C0HoSYXjSbhh/dU0eUV32nB4= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= @@ -1422,6 +1531,7 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1434,6 +1544,7 @@ github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= @@ -1456,11 +1567,13 @@ github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaL github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pashagolub/pgxmock/v2 v2.11.0 h1:ZUKqZy5Zf/5WJjAXHErjHngJBW5/3fEujGD+Cb0FuDI= github.com/pashagolub/pgxmock/v2 v2.11.0/go.mod h1:D3YslkN/nJ4+umVqWmbwfSXugJIjPMChkGBG47OJpNw= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -1492,11 +1605,13 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus-community/pro-bing v0.3.0 h1:SFT6gHqXwbItEDJhTkzPWVqU6CLEtqEfNAPp47RUON4= github.com/prometheus-community/pro-bing v0.3.0/go.mod h1:p9dLb9zdmv+eLxWfCT6jESWuDrS+YzpPkQBgysQF8a0= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -1513,6 +1628,7 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1521,11 +1637,13 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -1540,8 +1658,8 @@ github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851 h1:eRlNDHxGfVkPCRXbA4BfQJvt5DHjFiTtWy3R/t4djyY= github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851/go.mod h1:JDxG6+uubnk9/BZ2yUsyAJJwlptjrnmB2MPF5d2Xe/8= -github.com/replicatedhq/troubleshoot v0.57.0 h1:m9B31Mhgiz4Lwz+W4RvFkqhfYZLCwAqRPUwiwmSAAps= -github.com/replicatedhq/troubleshoot v0.57.0/go.mod h1:R5VdixzaBXfWLbP9mcLuZKs/bDCyGGS4+vFtKGWs9xE= +github.com/replicatedhq/troubleshoot v0.10.1 h1:rTSwTcd68aKXSMurLTGgC3N2MwK+pQKEwE/8P4gxGuU= +github.com/replicatedhq/troubleshoot v0.10.1/go.mod h1:yEOBGdG/MqkN3vRJe1TIa7doW2jVHEehU7Mngjf1Lk4= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 h1:mZHayPoR0lNmnHyvtYjDeq0zlVHn9K/ZXoy17ylucdo= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5/go.mod h1:GEXHk5HgEKCvEIIrSpFI3ozzG5xOKA2DVlEX/gGnewM= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -1551,6 +1669,7 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1561,6 +1680,7 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rubenv/sql-migrate v1.3.1 h1:Vx+n4Du8X8VTYuXbhNxdEUoh6wiJERA0GlWocR5FrbA= github.com/rubenv/sql-migrate v1.3.1/go.mod h1:YzG/Vh82CwyhTFXy+Mf5ahAiiEOpAlHurg+23VEzcsk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1571,13 +1691,17 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/sebdah/goldie v1.0.0 h1:9GNhIat69MSlz/ndaBg48vl9dF5fI+NBB6kfOxgfkMc= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/segmentio/ksuid v1.0.3/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= +github.com/shirou/gopsutil v3.21.1+incompatible h1:2LwXWdbjXwyDgq26Yy/OT4xozlpmssQfy/rtfhWb0bY= +github.com/shirou/gopsutil v3.21.1+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.23.6 h1:5y46WPI9QBKBbK7EEccUPNXpJpNrvPuTD0O2zHEHT08= github.com/shirou/gopsutil/v3 v3.23.6/go.mod h1:j7QX50DrXYggrpN30W0Mo+I4/8U2UUIQrnrhqUeWrAU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -1587,7 +1711,29 @@ github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnj github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI= @@ -1596,7 +1742,6 @@ github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXx github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -1616,6 +1761,8 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1628,6 +1775,7 @@ github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= @@ -1640,10 +1788,12 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= @@ -1681,6 +1831,7 @@ github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= @@ -1691,25 +1842,31 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/tsenart/deadcode v0.0.0-20160724212837-210d2dc333e9/go.mod h1:q+QjxYvZ+fpjMXqs+XEriussHjSYqeXVnAdSV1tkMYk= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo= github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= @@ -1764,8 +1921,10 @@ github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeW github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= @@ -1783,6 +1942,9 @@ go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= @@ -1791,6 +1953,7 @@ go.mongodb.org/mongo-driver v1.11.6/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqbly go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1822,6 +1985,7 @@ go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJP go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1844,24 +2008,27 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= -go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28 h1:zLxFnORHDFTSkJPawMU7LzsuGQJ4MUFS653jJHpORow= +go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28/go.mod h1:TQvodOM+hJTioNQJilmLXu08JNb8i+ccq418+KWu1/Y= golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1898,6 +2065,8 @@ golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb h1:xIApU0ow1zwMa2uL1VDNeQlNV golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1927,16 +2096,21 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1946,6 +2120,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2001,6 +2176,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2030,6 +2207,7 @@ golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2048,29 +2226,36 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2169,6 +2354,7 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2185,6 +2371,7 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2194,9 +2381,13 @@ golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181122213734-04b5d21e00f1/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2211,11 +2402,14 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2278,8 +2472,12 @@ golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNq golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2335,6 +2533,8 @@ google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4q google.golang.org/api v0.124.0 h1:dP6Ef1VgOGqQ8eiv4GiY8RhmeyqzovcXBYPDUYG8Syo= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -2343,6 +2543,10 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2482,17 +2686,18 @@ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII= gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -2505,11 +2710,13 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -2537,8 +2744,10 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= helm.sh/helm/v3 v3.12.3 h1:5y1+Sbty12t48T/t/CGNYUIME5BJ0WKfmW/sobYqkFg= helm.sh/helm/v3 v3.12.3/go.mod h1:KPKQiX9IP5HX7o5YnnhViMnNuKiL/lJBVQ47GHe1R0k= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2546,36 +2755,55 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU= -inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= +k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= +k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= k8s.io/apiextensions-apiserver v0.28.1 h1:l2ThkBRjrWpw4f24uq0Da2HaEgqJZ7pcgiEUTKSmQZw= k8s.io/apiextensions-apiserver v0.28.1/go.mod h1:sVvrI+P4vxh2YBBcm8n2ThjNyzU4BQGilCQ/JAY5kGs= +k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= +k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/apiserver v0.28.1 h1:dw2/NKauDZCnOUAzIo2hFhtBRUo6gQK832NV8kuDbGM= k8s.io/apiserver v0.28.1/go.mod h1:d8aizlSRB6yRgJ6PKfDkdwCy2DXt/d1FDR6iJN9kY1w= +k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA= +k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.28.2 h1:u47guga1rCWLnEnffF09p+cqj8B20oHOLoQ1lb1HGtQ= k8s.io/code-generator v0.28.2/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A= +k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= +k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= k8s.io/component-helpers v0.28.2 h1:r/XJ265PMirW9EcGXr/F+2yWrLPo2I69KdvcY/h9HAo= k8s.io/component-helpers v0.28.2/go.mod h1:pF1R5YWQ+sgf0i6EbVm+MQCzkYuqutDUibdrkvAa6aI= k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q= k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -2583,6 +2811,8 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.19.12 h1:OwyNUe/7/gxzEnaLd3sC9Yrpx0fZAERzvFslX5Qq5g8= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d h1:/CFeJBjBrZvHX09rObS2+2iEEDevMWYc1v3aIYAjIYI= k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= @@ -2592,28 +2822,32 @@ k8s.io/kubelet v0.26.1 h1:wQyCQYmLW6GN3v7gVTxnc3jAE4zMYDlzdF3FZV4rKas= k8s.io/kubelet v0.26.1/go.mod h1:gFVZ1Ab4XdjtnYdVRATwGwku7FhTxo6LVEZwYoQaDT8= k8s.io/metrics v0.28.2 h1:Z/oMk5SmiT/Ji1SaWOPfW2l9W831BLO9/XxDq9iS3ak= k8s.io/metrics v0.28.2/go.mod h1:QTIIdjMrq+KodO+rmp6R9Pr1LZO8kTArNtkWoQXw0sw= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= -periph.io/x/host/v3 v3.8.0 h1:T5ojZ2wvnZHGPS4h95N2ZpcCyHnsvH3YRZ1UUUiv5CQ= -periph.io/x/host/v3 v3.8.0/go.mod h1:rzOLH+2g9bhc6pWZrkCrmytD4igwQ2vxFw6Wn6ZOlLY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/controller-runtime v0.5.1-0.20200402191424-df180accb901/go.mod h1:j4echH3Y/UPHRpXS65rxGXujda8iWOheMQvDh1uNgaY= sigs.k8s.io/controller-runtime v0.15.2 h1:9V7b7SDQSJ08IIsJ6CY1CE85Okhp87dyTMNDG0FS7f4= sigs.k8s.io/controller-runtime v0.15.2/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 h1:vq2TtoDcQomhy7OxXLUOzSbHMuMYq0Bjn93cDtJEdKw= sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3/go.mod h1:/d88dHCvoy7d0AKFT0yytezSGZKjsZBVs9YTkBHSGFk= sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= @@ -2621,3 +2855,5 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= From c6ccbab1039be727a22aed5c53d80c45420f04c3 Mon Sep 17 00:00:00 2001 From: yijing Date: Thu, 28 Sep 2023 10:57:07 +0800 Subject: [PATCH 51/58] chore: Revert 'chore(deps): bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4' (#5301) --- go.mod | 24 +++-- go.sum | 304 +++++++-------------------------------------------------- 2 files changed, 50 insertions(+), 278 deletions(-) diff --git a/go.mod b/go.mod index 964d117b144..c48b0cc0ded 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/jackc/pgx/v5 v5.4.3 github.com/jedib0t/go-pretty/v6 v6.4.6 github.com/json-iterator/go v1.1.12 - github.com/k3d-io/k3d/v5 v5.6.0 + github.com/k3d-io/k3d/v5 v5.5.2 github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 github.com/kubesphere/kubekey/v3 v3.0.7 @@ -60,13 +60,14 @@ require ( github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.8 + github.com/opencontainers/image-spec v1.1.0-rc5 github.com/pashagolub/pgxmock/v2 v2.11.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 github.com/prometheus-community/pro-bing v0.3.0 github.com/redis/go-redis/v9 v9.0.5 github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851 - github.com/replicatedhq/troubleshoot v0.10.1 + github.com/replicatedhq/troubleshoot v0.57.0 github.com/robfig/cron/v3 v3.0.1 github.com/russross/blackfriday/v2 v2.1.0 github.com/sahilm/fuzzy v0.1.0 @@ -138,16 +139,16 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.0 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 // indirect - github.com/StackExchange/wmi v1.2.1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acomagu/bufpipe v1.0.4 // indirect + github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bhmj/xpression v0.9.1 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/chzyer/readline v1.5.1 // indirect @@ -163,7 +164,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/daviddengcn/go-colortext v1.0.0 // indirect @@ -171,6 +172,7 @@ require ( github.com/deislabs/oras v0.9.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 // indirect github.com/docker/cli v24.0.6+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect @@ -188,6 +190,7 @@ require ( github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-git/gcfg v1.5.0 // indirect github.com/go-git/go-billy/v5 v5.4.1 // indirect @@ -209,6 +212,7 @@ require ( github.com/go-test/deep v1.1.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -225,6 +229,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.9.1 // indirect + github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect @@ -271,6 +276,7 @@ require ( github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lithammer/dedent v1.1.0 // indirect + github.com/longhorn/go-iscsi-helper v0.0.0-20210330030558-49a327fb024e // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -304,7 +310,6 @@ require ( github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/runc v1.1.7 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/selinux v1.11.0 // indirect @@ -328,7 +333,6 @@ require ( github.com/rubenv/sql-migrate v1.3.1 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sergi/go-diff v1.2.0 // indirect - github.com/shirou/gopsutil v3.21.1+incompatible // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sigstore/fulcio v1.3.1 // indirect @@ -346,6 +350,7 @@ require ( github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/theupdateframework/notary v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/tj/go-spin v1.1.0 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect @@ -382,7 +387,8 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28 // indirect + go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/term v0.12.0 // indirect @@ -401,9 +407,11 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect k8s.io/apiserver v0.28.1 // indirect k8s.io/component-helpers v0.28.2 // indirect oras.land/oras-go v1.2.4 // indirect + periph.io/x/host/v3 v3.8.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/go.sum b/go.sum index 5a9aa97dea2..17eaec0d452 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,5 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -398,12 +396,7 @@ cuelang.org/go v0.6.0 h1:dJhgKCog+FEZt7OwAYV1R+o/RZPmE8aqFoptmxSWyr8= cuelang.org/go v0.6.0/go.mod h1:9CxOX8aawrr3BgSdqPj7V0RYoXo7XIb+yDFC6uESrOQ= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= @@ -475,7 +468,6 @@ github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0g github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 h1:JMDGhoQvXNTqH6Y3MC0IUw6tcZvaUdujNqzK2HYWZc8= github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -484,8 +476,6 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/O github.com/Shopify/sarama v1.37.2 h1:LoBbU0yJPte0cE5TZCGdlzZRmMgMtZU/XgnUKZg9Cv4= github.com/Shopify/sarama v1.37.2/go.mod h1:Nxye/E+YPru//Bpaorfhc3JsSGYwCaDDj+R4bK52U5o= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= -github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/StudioSol/set v1.0.0 h1:G27J71la+Da08WidabBkoRrvPLTa4cdCn0RjvyJ5WKQ= github.com/StudioSol/set v1.0.0/go.mod h1:hIUNZPo6rEGF43RlPXHq7Fjmf+HkVJBqAjtK7Z9LoIU= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= @@ -495,15 +485,12 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412 h1:vOVO0ypMfTt6tZacyI0kp+iCZb1XSNiYDqnzBWYgfe4= github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412/go.mod h1:AI9hp1tkp10pAlK5TCwL+7yWbRgtDm9jhToq6qij2xs= -github.com/alecthomas/gometalinter v2.0.11+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -518,15 +505,11 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/authzed/controller-idioms v0.7.0 h1:HhNMUBb8hJzYqY3mhen3B2AC5nsIem3fBe0tC/AAOHo= github.com/authzed/controller-idioms v0.7.0/go.mod h1:0B/PmqCguKv8b3azSMF+HdyKpKr2o3UAZ5eo12Ze8Fo= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.25.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.257 h1:HwelXYZZ8c34uFFhgVw3ybu2gB5fkk8KLj2idTvzZb8= github.com/aws/aws-sdk-go v1.44.257/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= @@ -549,13 +532,9 @@ github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENU github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= @@ -569,6 +548,8 @@ github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= +github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f h1:tRk+aBit+q3oqnj/1mF5HHhP2yxJM2lSa0afOJxQ3nE= +github.com/c9s/goprocinfo v0.0.0-20170724085704-0010a05ce49f/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -585,7 +566,6 @@ github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230912020346-a5d89c1c90ad/go.mod h github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chmduquesne/rollinghash v4.0.0+incompatible h1:hnREQO+DXjqIw3rUTzWN7/+Dpw+N5Um8zpKV0JOEgbo= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.1.11-0.20160617073814-96a4d311aa9b/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -597,7 +577,6 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38 github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/clbanning/mxj/v2 v2.5.7 h1:7q5lvUpaPF/WOkqgIDiwjBJaznaLCCBd78pi8ZyAnE0= github.com/clbanning/mxj/v2 v2.5.7/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= @@ -612,7 +591,6 @@ github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd/v3 v3.2.0 h1:79kHCn4tO0VGu3W0WujYrMjBDk8a2H4KEUYcXf7whcg= github.com/cockroachdb/apd/v3 v3.2.0/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= @@ -644,35 +622,27 @@ github.com/containers/storage v1.48.1/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCm github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM= github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/dapr/kit v0.11.3 h1:u1X92tE8xsrwXIej7nkcI5Z1t1CFznPwlL18tizNEw4= @@ -697,6 +667,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= @@ -715,22 +686,20 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= @@ -738,11 +707,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/proto v1.10.0 h1:pDGyFRVV5RvV+nkBK9iy3q67FBy9Xa7vwrOTE+g5aGw= @@ -757,8 +723,6 @@ github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0+ github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/estesp/manifest-tool/v2 v2.0.3 h1:F9HMOqcXvtW+8drQB+BjNRU/+bLXOwCfj3mbjqQC2Ns= github.com/estesp/manifest-tool/v2 v2.0.3/go.mod h1:Suh+tbKQvKHcs4Vltzy8gwZk1y9eSRI635gT4gFw5Ss= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -776,8 +740,9 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= @@ -792,11 +757,8 @@ github.com/garyburd/redigo v1.6.3 h1:HCeeRluvAgMusMomi1+6Y5dmFOdYV/JzoRrrbFlkGIc github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= @@ -826,80 +788,48 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -907,19 +837,14 @@ github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/e github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-quicktest/qt v1.100.0 h1:I7iSLgIwNp0E0UnSvKJzs7ig0jg/Iq83zsZjtQNW7jY= -github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redismock/v9 v9.0.3 h1:mtHQi2l51lCmXIbTRTqb1EiHYe9tL5Yk5oorlSJJqR0= github.com/go-redis/redismock/v9 v9.0.3/go.mod h1:F6tJRfnU8R/NZ0E+Gjvoluk14MqMC5ueSZX6vVQypc0= github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= @@ -964,6 +889,8 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= @@ -981,14 +908,12 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -998,7 +923,6 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71 github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -1057,10 +981,8 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -1090,10 +1012,8 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHa github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1104,8 +1024,6 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1118,22 +1036,17 @@ github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMd github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gax-go/v2 v2.9.1 h1:DpTpJqzZ3NvX9zqjhIuI1oVzYZMvboZe+3LoeEIJjHM= github.com/googleapis/gax-go/v2 v2.9.1/go.mod h1:4FG3gMrVZlyMp5itSYKMU9z/lBE7+SbnUOvzH2HqbEY= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -1143,15 +1056,12 @@ github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16 github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= @@ -1167,11 +1077,9 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e/go.mod h1:/O1k/AizTN0QmfEKknCYGvICeyKUDqCYA8vvWtGWDeQ= github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= @@ -1191,7 +1099,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= @@ -1224,8 +1131,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo= @@ -1257,7 +1162,6 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedib0t/go-pretty/v6 v6.4.6 h1:v6aG9h6Uby3IusSSEjHaZNXpHFhzqMmjXcPq1Rjl9Jw= github.com/jedib0t/go-pretty/v6 v6.4.6/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE= @@ -1265,8 +1169,6 @@ github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8c github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc= github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -1283,7 +1185,6 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -1292,11 +1193,10 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k3d-io/k3d/v5 v5.6.0 h1:XMRSQXyPErOcDCdOJVi6HUPjJZuWd/N6Dss7QeCDRhk= -github.com/k3d-io/k3d/v5 v5.6.0/go.mod h1:t/hRD2heCSkO9TJJdzFT72jXGCY8PjsCsClgjcmMoAA= +github.com/k3d-io/k3d/v5 v5.5.2 h1:VEkopEqTUBpGJghjltWqv1jI57MLKFaxWt2yBp2lZmE= +github.com/k3d-io/k3d/v5 v5.5.2/go.mod h1:PA0IkO8CB2OsBpBO3rJwskmA69Ibb9qdFiUGE/8IqUA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -1331,7 +1231,6 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -1364,7 +1263,6 @@ github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0= github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -1372,9 +1270,11 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/longhorn/go-iscsi-helper v0.0.0-20210330030558-49a327fb024e h1:hz4quJkaJWDo+xW+G6wTF6d6/95QvJ+o2D0+bB/tJ1U= +github.com/longhorn/go-iscsi-helper v0.0.0-20210330030558-49a327fb024e/go.mod h1:9z/y9glKmWEdV50tjlUPxFwi1goQfIrrsoZbnMyIZbY= +github.com/longhorn/nsfilelock v0.0.0-20200723175406-fa7c83ad0003/go.mod h1:0CLeXlf59Lg6C0kjLSDf47ft73Dh37CwymYRKWwAn04= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1384,15 +1284,12 @@ github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPK github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/manifoldco/promptui v0.3.2/go.mod h1:8JU+igZ+eeiiRku4T5BjtKh2ms8sziGpSYl1gN8Bazw= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= @@ -1414,7 +1311,6 @@ github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqf github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -1439,7 +1335,6 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -1462,7 +1357,6 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -1519,11 +1413,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= -github.com/nicksnyder/go-i18n v1.10.1/go.mod h1:e4Di5xjP9oTVrC6y3C7C0HoSYXjSbhh/dU0eUV32nB4= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= @@ -1531,7 +1422,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1544,7 +1434,6 @@ github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= @@ -1567,13 +1456,11 @@ github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaL github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pashagolub/pgxmock/v2 v2.11.0 h1:ZUKqZy5Zf/5WJjAXHErjHngJBW5/3fEujGD+Cb0FuDI= github.com/pashagolub/pgxmock/v2 v2.11.0/go.mod h1:D3YslkN/nJ4+umVqWmbwfSXugJIjPMChkGBG47OJpNw= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -1605,13 +1492,11 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus-community/pro-bing v0.3.0 h1:SFT6gHqXwbItEDJhTkzPWVqU6CLEtqEfNAPp47RUON4= github.com/prometheus-community/pro-bing v0.3.0/go.mod h1:p9dLb9zdmv+eLxWfCT6jESWuDrS+YzpPkQBgysQF8a0= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -1628,7 +1513,6 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1637,13 +1521,11 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -1658,8 +1540,8 @@ github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851 h1:eRlNDHxGfVkPCRXbA4BfQJvt5DHjFiTtWy3R/t4djyY= github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851/go.mod h1:JDxG6+uubnk9/BZ2yUsyAJJwlptjrnmB2MPF5d2Xe/8= -github.com/replicatedhq/troubleshoot v0.10.1 h1:rTSwTcd68aKXSMurLTGgC3N2MwK+pQKEwE/8P4gxGuU= -github.com/replicatedhq/troubleshoot v0.10.1/go.mod h1:yEOBGdG/MqkN3vRJe1TIa7doW2jVHEehU7Mngjf1Lk4= +github.com/replicatedhq/troubleshoot v0.57.0 h1:m9B31Mhgiz4Lwz+W4RvFkqhfYZLCwAqRPUwiwmSAAps= +github.com/replicatedhq/troubleshoot v0.57.0/go.mod h1:R5VdixzaBXfWLbP9mcLuZKs/bDCyGGS4+vFtKGWs9xE= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 h1:mZHayPoR0lNmnHyvtYjDeq0zlVHn9K/ZXoy17ylucdo= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5/go.mod h1:GEXHk5HgEKCvEIIrSpFI3ozzG5xOKA2DVlEX/gGnewM= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -1669,7 +1551,6 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1680,7 +1561,6 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rubenv/sql-migrate v1.3.1 h1:Vx+n4Du8X8VTYuXbhNxdEUoh6wiJERA0GlWocR5FrbA= github.com/rubenv/sql-migrate v1.3.1/go.mod h1:YzG/Vh82CwyhTFXy+Mf5ahAiiEOpAlHurg+23VEzcsk= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1691,17 +1571,13 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/sebdah/goldie v1.0.0 h1:9GNhIat69MSlz/ndaBg48vl9dF5fI+NBB6kfOxgfkMc= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/segmentio/ksuid v1.0.3/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= -github.com/shirou/gopsutil v3.21.1+incompatible h1:2LwXWdbjXwyDgq26Yy/OT4xozlpmssQfy/rtfhWb0bY= -github.com/shirou/gopsutil v3.21.1+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.23.6 h1:5y46WPI9QBKBbK7EEccUPNXpJpNrvPuTD0O2zHEHT08= github.com/shirou/gopsutil/v3 v3.23.6/go.mod h1:j7QX50DrXYggrpN30W0Mo+I4/8U2UUIQrnrhqUeWrAU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -1711,29 +1587,7 @@ github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnj github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI= @@ -1742,6 +1596,7 @@ github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXx github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -1761,8 +1616,6 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1775,7 +1628,6 @@ github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= @@ -1788,12 +1640,10 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= @@ -1831,7 +1681,6 @@ github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= @@ -1842,31 +1691,25 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/tsenart/deadcode v0.0.0-20160724212837-210d2dc333e9/go.mod h1:q+QjxYvZ+fpjMXqs+XEriussHjSYqeXVnAdSV1tkMYk= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo= github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= @@ -1921,10 +1764,8 @@ github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeW github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= @@ -1942,9 +1783,6 @@ go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= @@ -1953,7 +1791,6 @@ go.mongodb.org/mongo-driver v1.11.6/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqbly go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1985,7 +1822,6 @@ go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJP go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -2008,27 +1844,24 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28 h1:zLxFnORHDFTSkJPawMU7LzsuGQJ4MUFS653jJHpORow= -go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28/go.mod h1:TQvodOM+hJTioNQJilmLXu08JNb8i+ccq418+KWu1/Y= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= +go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= +go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2065,8 +1898,6 @@ golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb h1:xIApU0ow1zwMa2uL1VDNeQlNV golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -2096,21 +1927,16 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2120,7 +1946,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2176,8 +2001,6 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2207,7 +2030,6 @@ golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2226,36 +2048,29 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2354,7 +2169,6 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2371,7 +2185,6 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2381,13 +2194,9 @@ golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181122213734-04b5d21e00f1/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2402,14 +2211,11 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2472,12 +2278,8 @@ golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNq golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2533,8 +2335,6 @@ google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4q google.golang.org/api v0.124.0 h1:dP6Ef1VgOGqQ8eiv4GiY8RhmeyqzovcXBYPDUYG8Syo= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -2543,10 +2343,6 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2686,18 +2482,17 @@ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20191105091915-95d230a53780/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII= gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -2710,13 +2505,11 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -2744,10 +2537,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= helm.sh/helm/v3 v3.12.3 h1:5y1+Sbty12t48T/t/CGNYUIME5BJ0WKfmW/sobYqkFg= helm.sh/helm/v3 v3.12.3/go.mod h1:KPKQiX9IP5HX7o5YnnhViMnNuKiL/lJBVQ47GHe1R0k= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2755,55 +2546,36 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= +inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU= +inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= -k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= -k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= k8s.io/apiextensions-apiserver v0.28.1 h1:l2ThkBRjrWpw4f24uq0Da2HaEgqJZ7pcgiEUTKSmQZw= k8s.io/apiextensions-apiserver v0.28.1/go.mod h1:sVvrI+P4vxh2YBBcm8n2ThjNyzU4BQGilCQ/JAY5kGs= -k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= -k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= -k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/apiserver v0.28.1 h1:dw2/NKauDZCnOUAzIo2hFhtBRUo6gQK832NV8kuDbGM= k8s.io/apiserver v0.28.1/go.mod h1:d8aizlSRB6yRgJ6PKfDkdwCy2DXt/d1FDR6iJN9kY1w= -k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA= -k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= -k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.28.2 h1:u47guga1rCWLnEnffF09p+cqj8B20oHOLoQ1lb1HGtQ= k8s.io/code-generator v0.28.2/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A= -k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= -k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= k8s.io/component-helpers v0.28.2 h1:r/XJ265PMirW9EcGXr/F+2yWrLPo2I69KdvcY/h9HAo= k8s.io/component-helpers v0.28.2/go.mod h1:pF1R5YWQ+sgf0i6EbVm+MQCzkYuqutDUibdrkvAa6aI= k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q= k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -2811,8 +2583,6 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.19.12 h1:OwyNUe/7/gxzEnaLd3sC9Yrpx0fZAERzvFslX5Qq5g8= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d h1:/CFeJBjBrZvHX09rObS2+2iEEDevMWYc1v3aIYAjIYI= k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= @@ -2822,32 +2592,28 @@ k8s.io/kubelet v0.26.1 h1:wQyCQYmLW6GN3v7gVTxnc3jAE4zMYDlzdF3FZV4rKas= k8s.io/kubelet v0.26.1/go.mod h1:gFVZ1Ab4XdjtnYdVRATwGwku7FhTxo6LVEZwYoQaDT8= k8s.io/metrics v0.28.2 h1:Z/oMk5SmiT/Ji1SaWOPfW2l9W831BLO9/XxDq9iS3ak= k8s.io/metrics v0.28.2/go.mod h1:QTIIdjMrq+KodO+rmp6R9Pr1LZO8kTArNtkWoQXw0sw= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= +periph.io/x/host/v3 v3.8.0 h1:T5ojZ2wvnZHGPS4h95N2ZpcCyHnsvH3YRZ1UUUiv5CQ= +periph.io/x/host/v3 v3.8.0/go.mod h1:rzOLH+2g9bhc6pWZrkCrmytD4igwQ2vxFw6Wn6ZOlLY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/controller-runtime v0.5.1-0.20200402191424-df180accb901/go.mod h1:j4echH3Y/UPHRpXS65rxGXujda8iWOheMQvDh1uNgaY= sigs.k8s.io/controller-runtime v0.15.2 h1:9V7b7SDQSJ08IIsJ6CY1CE85Okhp87dyTMNDG0FS7f4= sigs.k8s.io/controller-runtime v0.15.2/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 h1:vq2TtoDcQomhy7OxXLUOzSbHMuMYq0Bjn93cDtJEdKw= sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3/go.mod h1:/d88dHCvoy7d0AKFT0yytezSGZKjsZBVs9YTkBHSGFk= sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= @@ -2855,5 +2621,3 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= From f7c18e0c0c397603ed772727203792bee362b47a Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Thu, 28 Sep 2023 11:49:14 +0800 Subject: [PATCH 52/58] feat: improve and refactor backup and restore (#4917) Signed-off-by: L.Dongming Co-authored-by: wangyelei --- PROJECT | 34 +- .../v1alpha1/backuppolicytemplate_types.go | 159 +- apis/apps/v1alpha1/cluster_types.go | 33 +- apis/apps/v1alpha1/opsrequest_types.go | 6 +- apis/apps/v1alpha1/zz_generated.deepcopy.go | 204 +- .../v1alpha1/actionset_types.go | 249 ++ apis/dataprotection/v1alpha1/backup_types.go | 359 +-- .../v1alpha1/backup_types_test.go | 170 -- .../v1alpha1/backuppolicy_types.go | 394 +-- .../v1alpha1/backuppolicy_types_test.go | 83 - .../v1alpha1/backupschedule_types.go | 159 ++ .../v1alpha1/backuptool_types.go | 161 -- apis/dataprotection/v1alpha1/restore_types.go | 438 +++ .../v1alpha1/restorejob_types.go | 102 - apis/dataprotection/v1alpha1/types.go | 248 +- .../v1alpha1/zz_generated.deepcopy.go | 1310 ++++++--- cmd/dataprotection/main.go | 35 +- cmd/manager/main.go | 4 +- ...s.kubeblocks.io_backuppolicytemplates.yaml | 692 +++-- .../bases/apps.kubeblocks.io_clusters.yaml | 25 +- .../bases/apps.kubeblocks.io_opsrequests.yaml | 8 +- ...taprotection.kubeblocks.io_actionsets.yaml | 554 ++++ ...otection.kubeblocks.io_backuppolicies.yaml | 926 +++--- .../dataprotection.kubeblocks.io_backups.yaml | 732 ++++- ...tection.kubeblocks.io_backupschedules.yaml | 141 + ...aprotection.kubeblocks.io_backuptools.yaml | 330 --- ...aprotection.kubeblocks.io_restorejobs.yaml | 1795 ------------ ...dataprotection.kubeblocks.io_restores.yaml | 2522 +++++++++++++++++ config/crd/kustomization.yaml | 7 +- ...jobs.yaml => cainjection_in_restores.yaml} | 2 +- ...torejobs.yaml => webhook_in_restores.yaml} | 2 +- ...=> dataprotection_estore_editor_role.yaml} | 8 +- ...> dataprotection_restore_viewer_role.yaml} | 8 +- config/rbac/role.yaml | 38 +- .../dataprotection_v1alpha1_restore.yaml | 6 + controllers/apps/class_controller_test.go | 1 + controllers/apps/cluster_controller.go | 12 +- controllers/apps/cluster_controller_test.go | 368 ++- controllers/apps/cluster_plan_builder_test.go | 2 +- .../apps/cluster_status_event_handler_test.go | 2 +- controllers/apps/components/component.go | 4 +- .../components/hscale_volume_populator.go | 358 +-- controllers/apps/components/utils_test.go | 2 +- controllers/apps/operations/backup.go | 35 +- .../apps/operations/switchover_test.go | 2 +- .../apps/operations/switchover_util_test.go | 4 +- .../apps/operations/util/common_util.go | 4 +- .../apps/operations/util/common_util_test.go | 4 +- controllers/apps/opsrequest_controller.go | 6 +- .../apps/opsrequest_controller_test.go | 45 +- controllers/apps/suite_test.go | 4 +- controllers/apps/systemaccount_controller.go | 2 +- controllers/apps/transform_restore.go | 15 +- controllers/apps/transform_types.go | 4 +- .../apps/transformer_backup_policy_tpl.go | 714 ++--- .../apps/transformer_cluster_deletion.go | 8 +- controllers/apps/transformer_rbac.go | 3 +- controllers/apps/utils.go | 6 + .../dataprotection/actionset_controller.go | 103 + .../actionset_controller_test.go | 61 + .../dataprotection/backup_controller.go | 2145 +++----------- .../dataprotection/backup_controller_test.go | 814 ++---- .../dataprotection/backuppolicy_controller.go | 651 +---- .../backuppolicy_controller_test.go | 534 +--- .../dataprotection/backuprepo_controller.go | 9 +- .../backuprepo_controller_test.go | 9 +- .../backupschedule_controller.go | 244 ++ .../backupschedule_controller_test.go | 282 ++ .../dataprotection/backuptool_controller.go | 103 - .../dataprotection/cronjob_controller.go | 96 - controllers/dataprotection/cue/cronjob.cue | 138 - .../dataprotection/cue/manifests_updater.cue | 61 - .../dataprotection/restore_controller.go | 389 +++ .../dataprotection/restorejob_controller.go | 350 --- .../restorejob_controller_test.go | 200 -- controllers/dataprotection/suite_test.go | 47 +- .../dataprotection/{type.go => types.go} | 35 +- controllers/dataprotection/utils.go | 303 +- .../extensions/addon_controller_test.go | 12 +- .../apecloud-mysql/dataprotection/backup.sh | 12 + .../dataprotection/pitr-backup.sh | 135 - .../dataprotection/pitr-restore.sh | 50 - .../apecloud-mysql/dataprotection/restore.sh | 12 + .../apecloud-mysql/templates/actionset.yaml | 65 + .../templates/backuppolicytemplate.yaml | 63 +- .../backuppolicytemplateforhscale.yaml | 36 +- .../templates/backuptool-pitr.yaml | 63 - .../apecloud-mysql/templates/backuptool.yaml | 55 - .../templates/backuptoolforhscale.yaml | 46 - deploy/helm/config/rbac/role.yaml | 38 +- ...s.kubeblocks.io_backuppolicytemplates.yaml | 692 +++-- .../crds/apps.kubeblocks.io_clusters.yaml | 25 +- .../crds/apps.kubeblocks.io_opsrequests.yaml | 8 +- ...taprotection.kubeblocks.io_actionsets.yaml | 554 ++++ ...otection.kubeblocks.io_backuppolicies.yaml | 926 +++--- .../dataprotection.kubeblocks.io_backups.yaml | 732 ++++- ...tection.kubeblocks.io_backupschedules.yaml | 141 + ...aprotection.kubeblocks.io_backuptools.yaml | 330 --- ...aprotection.kubeblocks.io_restorejobs.yaml | 1795 ------------ ...dataprotection.kubeblocks.io_restores.yaml | 2522 +++++++++++++++++ ...> dataprotection_restore_editor_role.yaml} | 8 +- ...> dataprotection_restore_viewer_role.yaml} | 8 +- deploy/helm/values.yaml | 412 +-- .../templates/backuppolicytemplate.yaml | 22 +- .../dataprotection/backup-info-collector.sh | 6 +- .../mongodb/dataprotection/datafile-backup.sh | 10 + .../dataprotection/datafile-restore.sh | 12 + .../dataprotection/mongodump-backup.sh | 12 + .../dataprotection/mongodump-restore.sh | 6 + deploy/mongodb/dataprotection/pitr-backup.sh | 18 +- deploy/mongodb/scripts/replicaset-setup.tpl | 1 + .../mongodb/templates/actionset-datafile.yaml | 58 + deploy/mongodb/templates/actionset-dump.yaml | 39 + .../templates/backuppolicytemplate.yaml | 67 +- deploy/mongodb/templates/backuptool.yaml | 52 - .../templates/backuptool_mongodump.yaml | 41 - deploy/mongodb/templates/backuptool_pitr.yaml | 36 - .../mongodb/templates/clusterdefinition.yaml | 2 +- deploy/mongodb/values.yaml | 2 + .../templates/actionset-xtrabackup.yaml | 49 + .../templates/backuppolicytemplate.yaml | 32 +- deploy/oracle-mysql/templates/backuptool.yaml | 36 - .../templates/clusterdefinition.yaml | 2 +- deploy/oracle-mysql/values.yaml | 2 + .../dataprotection/backup-info-collector.sh | 6 +- .../dataprotection/fetch-wal-log.sh | 57 - .../dataprotection/pg-basebackup-backup.sh | 12 + .../dataprotection/pg-basebackup-restore.sh | 15 + .../postgresql/dataprotection/pitr-backup.sh | 126 - .../postgresql/dataprotection/pitr-restore.sh | 30 - .../templates/actionset-pgbasebackup.yaml | 38 + .../templates/backuppolicytemplate.yaml | 69 +- .../templates/backuptool-pgbasebackup.yaml | 56 - .../postgresql/templates/backuptool-pitr.yaml | 56 - .../templates/backuptool-wal-g.yaml | 76 - .../templates/clusterdefinition.yaml | 8 +- deploy/postgresql/values.yaml | 2 + deploy/qdrant/scripts/qdrant-backup.sh | 13 +- deploy/qdrant/scripts/qdrant-restore.sh | 2 +- .../qdrant/templates/actionset-datafile.yaml | 35 + .../templates/backuppolicytemplate.yaml | 34 +- deploy/qdrant/templates/backuptool.yaml | 25 - .../qdrant/templates/clusterdefinition.yaml | 2 +- deploy/qdrant/values.yaml | 4 +- deploy/redis/dataprotection/backup.sh | 19 + deploy/redis/dataprotection/restore.sh | 12 + deploy/redis/templates/backupactionset.yaml | 37 + .../redis/templates/backuppolicytemplate.yaml | 46 +- deploy/redis/templates/backuptool.yaml | 54 - deploy/redis/templates/clusterdefinition.yaml | 6 +- deploy/redis/values.yaml | 1 + .../templates/backuppolicytemplate.yaml | 22 +- docs/user_docs/cli/kbcli_backup_create.md | 2 +- docs/user_docs/cli/kbcli_cluster_backup.md | 2 +- docs/user_docs/cli/kbcli_cluster_create.md | 2 +- docs/user_docs/cli/kbcli_cluster_restore.md | 8 +- internal/cli/cluster/cluster.go | 5 +- internal/cli/cluster/cluster_test.go | 5 +- internal/cli/cmd/backup/create.go | 2 +- internal/cli/cmd/backuprepo/create.go | 12 +- internal/cli/cmd/backuprepo/describe.go | 12 +- .../cli/cmd/builder/template/helm_helper.go | 6 +- internal/cli/cmd/cluster/config_util_test.go | 2 +- internal/cli/cmd/cluster/create.go | 140 +- internal/cli/cmd/cluster/create_test.go | 36 +- internal/cli/cmd/cluster/dataprotection.go | 325 +-- .../cli/cmd/cluster/dataprotection_test.go | 204 +- internal/cli/cmd/cluster/describe.go | 102 +- internal/cli/cmd/cluster/describe_test.go | 20 +- internal/cli/cmd/cluster/update.go | 18 +- .../cmd/kubeblocks/kubeblocks_objects_test.go | 34 +- internal/cli/cmd/kubeblocks/status.go | 2 +- internal/cli/cmd/report/report.go | 7 +- .../cli/create/template/backup_template.cue | 4 +- internal/cli/scheme/install.go | 4 +- internal/cli/testing/fake.go | 72 +- internal/cli/types/types.go | 29 +- internal/cli/util/helm/helm.go | 5 +- internal/constant/const.go | 53 +- internal/controller/builder/builder_backup.go | 4 +- .../controller/builder/builder_backup_test.go | 11 +- .../controller/component/affinity_utils.go | 2 +- .../component/affinity_utils_test.go | 6 +- internal/controller/component/component.go | 2 +- internal/controller/component/suite_test.go | 4 +- .../controller/configuration/suite_test.go | 4 +- internal/controller/factory/builder.go | 48 +- internal/controller/factory/builder_test.go | 16 - internal/controller/factory/suite_test.go | 4 +- internal/controller/plan/restore.go | 952 ++----- internal/controller/plan/restore_test.go | 314 +- internal/controller/plan/suite_test.go | 4 +- internal/controller/rsm/utils.go | 2 +- internal/controllerutil/controller_common.go | 4 +- internal/controllerutil/errors.go | 54 +- internal/controllerutil/errors_test.go | 56 - internal/controllerutil/pod_utils.go | 14 +- internal/dataprotection/action/action.go | 51 + .../dataprotection/action/action_create_vs.go | 261 ++ .../action/action_create_vs_test.go | 102 + internal/dataprotection/action/action_exec.go | 110 + .../dataprotection/action/action_exec_test.go | 127 + internal/dataprotection/action/action_job.go | 137 + .../dataprotection/action/action_job_test.go | 120 + .../dataprotection/action/builder_status.go | 105 + internal/dataprotection/action/suite_test.go | 152 + internal/dataprotection/action/types.go | 27 + internal/dataprotection/action/utils.go | 35 + internal/dataprotection/backup/deleter.go | 252 ++ .../dataprotection/backup/deleter_test.go | 160 ++ internal/dataprotection/backup/request.go | 410 +++ .../dataprotection/backup/request_test.go | 20 + internal/dataprotection/backup/scheduler.go | 369 +++ .../dataprotection/backup/scheduler_test.go | 20 + internal/dataprotection/backup/suite_test.go | 141 + internal/dataprotection/backup/types.go | 31 + internal/dataprotection/backup/utils.go | 235 ++ internal/dataprotection/builder/builder.go | 41 + .../dataprotection/builder/builder_test.go | 36 + internal/dataprotection/builder/suite_test.go | 47 + internal/dataprotection/errors/errors.go | 85 + internal/dataprotection/errors/errors_test.go | 229 ++ internal/dataprotection/restore/builder.go | 294 ++ internal/dataprotection/restore/manager.go | 513 ++++ internal/dataprotection/restore/types.go | 60 + internal/dataprotection/restore/utils.go | 218 ++ internal/dataprotection/types/constant.go | 99 + internal/dataprotection/types/types.go | 25 + .../dataprotection/utils/boolptr/boolptr.go | 42 + .../utils/boolptr/boolptr_test.go | 39 + internal/dataprotection/utils/envvar.go | 63 + internal/dataprotection/utils/utils.go | 135 + internal/generics/type.go | 15 +- internal/testutil/apps/backup_factory.go | 70 - .../testutil/apps/backuppolicy_factory.go | 236 -- .../apps/backuppolicytemplate_factory.go | 213 +- internal/testutil/apps/base_factory.go | 16 +- .../apps/cluster_consensus_test_util.go | 11 +- internal/testutil/apps/cluster_factory.go | 83 +- .../apps/cluster_replication_test_util.go | 2 +- .../apps/cluster_stateless_test_util.go | 2 +- internal/testutil/apps/clusterdef_factory.go | 14 +- .../testutil/apps/clusterversion_factory.go | 16 +- internal/testutil/apps/common_util.go | 2 +- .../apps/componentclassdefinition_factory.go | 6 +- .../componentresourceconstraint_factory.go | 8 +- internal/testutil/apps/constant.go | 4 +- internal/testutil/apps/deployment_factoy.go | 10 +- internal/testutil/apps/pod_factory.go | 8 +- internal/testutil/apps/pvc_factoy.go | 8 +- internal/testutil/apps/restorejob_factory.go | 86 - internal/testutil/apps/rsm_factoy.go | 10 +- .../apps/servicedescriptor_factory.go | 12 +- internal/testutil/apps/statefulset_factoy.go | 10 +- .../testutil/dataprotection/backup_factory.go | 68 + .../testutil/dataprotection/backup_utils.go | 223 ++ .../dataprotection/backuppolicy_factory.go | 106 + .../backuprepo_factory.go | 31 +- .../dataprotection/backupschedule_factory.go | 56 + internal/testutil/dataprotection/constant.go | 50 + internal/testutil/dataprotection/k8s_utils.go | 33 + .../dataprotection/restore_factory.go | 86 + internal/testutil/dataprotection/types.go | 32 + internal/testutil/dataprotection/utils.go | 60 + .../testutil/dataprotection/vs_factory.go | 44 + .../dataprotection/v1alpha1/actionset.go | 184 ++ .../dataprotection/v1alpha1/backupschedule.go | 195 ++ .../dataprotection/v1alpha1/backuptool.go | 184 -- .../v1alpha1/dataprotection_client.go | 17 +- .../v1alpha1/fake/fake_actionset.go | 132 + .../v1alpha1/fake/fake_backupschedule.go | 141 + .../v1alpha1/fake/fake_backuptool.go | 132 - .../fake/fake_dataprotection_client.go | 12 +- .../v1alpha1/fake/fake_restore.go | 141 + .../v1alpha1/fake/fake_restorejob.go | 141 - .../v1alpha1/generated_expansion.go | 6 +- .../v1alpha1/{restorejob.go => restore.go} | 106 +- .../v1alpha1/{backuptool.go => actionset.go} | 38 +- .../dataprotection/v1alpha1/backupschedule.go | 90 + .../dataprotection/v1alpha1/interface.go | 27 +- .../v1alpha1/{restorejob.go => restore.go} | 38 +- .../informers/externalversions/generic.go | 10 +- .../v1alpha1/{backuptool.go => actionset.go} | 38 +- .../dataprotection/v1alpha1/backupschedule.go | 99 + .../v1alpha1/expansion_generated.go | 26 +- .../dataprotection/v1alpha1/restore.go | 99 + .../dataprotection/v1alpha1/restorejob.go | 99 - test/integration/backup_mysql_test.go | 31 +- test/integration/controller_suite_test.go | 34 +- test/testdata/backup/actionset.yaml | 36 + test/testdata/backup/backuptool.yaml | 35 - 291 files changed, 22877 insertions(+), 18481 deletions(-) create mode 100644 apis/dataprotection/v1alpha1/actionset_types.go delete mode 100644 apis/dataprotection/v1alpha1/backup_types_test.go delete mode 100644 apis/dataprotection/v1alpha1/backuppolicy_types_test.go create mode 100644 apis/dataprotection/v1alpha1/backupschedule_types.go delete mode 100644 apis/dataprotection/v1alpha1/backuptool_types.go create mode 100644 apis/dataprotection/v1alpha1/restore_types.go delete mode 100644 apis/dataprotection/v1alpha1/restorejob_types.go create mode 100644 config/crd/bases/dataprotection.kubeblocks.io_actionsets.yaml create mode 100644 config/crd/bases/dataprotection.kubeblocks.io_backupschedules.yaml delete mode 100644 config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml delete mode 100644 config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml create mode 100644 config/crd/bases/dataprotection.kubeblocks.io_restores.yaml rename config/crd/patches/{cainjection_in_dataprotection_restorejobs.yaml => cainjection_in_restores.yaml} (83%) rename config/crd/patches/{webhook_in_dataprotection_restorejobs.yaml => webhook_in_restores.yaml} (88%) rename config/rbac/{dataprotection_restorejob_editor_role.yaml => dataprotection_estore_editor_role.yaml} (70%) rename config/rbac/{dataprotection_restorejob_viewer_role.yaml => dataprotection_restore_viewer_role.yaml} (67%) create mode 100644 config/samples/dataprotection_v1alpha1_restore.yaml create mode 100644 controllers/dataprotection/actionset_controller.go create mode 100644 controllers/dataprotection/actionset_controller_test.go create mode 100644 controllers/dataprotection/backupschedule_controller.go create mode 100644 controllers/dataprotection/backupschedule_controller_test.go delete mode 100644 controllers/dataprotection/backuptool_controller.go delete mode 100644 controllers/dataprotection/cronjob_controller.go delete mode 100644 controllers/dataprotection/cue/cronjob.cue delete mode 100644 controllers/dataprotection/cue/manifests_updater.cue create mode 100644 controllers/dataprotection/restore_controller.go delete mode 100644 controllers/dataprotection/restorejob_controller.go delete mode 100644 controllers/dataprotection/restorejob_controller_test.go rename controllers/dataprotection/{type.go => types.go} (75%) create mode 100644 deploy/apecloud-mysql/dataprotection/backup.sh delete mode 100644 deploy/apecloud-mysql/dataprotection/pitr-backup.sh delete mode 100644 deploy/apecloud-mysql/dataprotection/pitr-restore.sh create mode 100644 deploy/apecloud-mysql/dataprotection/restore.sh create mode 100644 deploy/apecloud-mysql/templates/actionset.yaml delete mode 100644 deploy/apecloud-mysql/templates/backuptool-pitr.yaml delete mode 100644 deploy/apecloud-mysql/templates/backuptool.yaml delete mode 100644 deploy/apecloud-mysql/templates/backuptoolforhscale.yaml create mode 100644 deploy/helm/crds/dataprotection.kubeblocks.io_actionsets.yaml create mode 100644 deploy/helm/crds/dataprotection.kubeblocks.io_backupschedules.yaml delete mode 100644 deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml delete mode 100644 deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml create mode 100644 deploy/helm/crds/dataprotection.kubeblocks.io_restores.yaml rename deploy/helm/templates/rbac/{dataprotection_restorejob_editor_role.yaml => dataprotection_restore_editor_role.yaml} (69%) rename deploy/helm/templates/rbac/{dataprotection_restorejob_viewer_role.yaml => dataprotection_restore_viewer_role.yaml} (66%) create mode 100644 deploy/mongodb/dataprotection/datafile-backup.sh create mode 100644 deploy/mongodb/dataprotection/datafile-restore.sh create mode 100644 deploy/mongodb/dataprotection/mongodump-backup.sh create mode 100644 deploy/mongodb/dataprotection/mongodump-restore.sh create mode 100644 deploy/mongodb/templates/actionset-datafile.yaml create mode 100644 deploy/mongodb/templates/actionset-dump.yaml delete mode 100644 deploy/mongodb/templates/backuptool.yaml delete mode 100644 deploy/mongodb/templates/backuptool_mongodump.yaml delete mode 100644 deploy/mongodb/templates/backuptool_pitr.yaml create mode 100644 deploy/oracle-mysql/templates/actionset-xtrabackup.yaml delete mode 100644 deploy/oracle-mysql/templates/backuptool.yaml delete mode 100644 deploy/postgresql/dataprotection/fetch-wal-log.sh create mode 100644 deploy/postgresql/dataprotection/pg-basebackup-backup.sh create mode 100644 deploy/postgresql/dataprotection/pg-basebackup-restore.sh delete mode 100644 deploy/postgresql/dataprotection/pitr-backup.sh delete mode 100644 deploy/postgresql/dataprotection/pitr-restore.sh create mode 100644 deploy/postgresql/templates/actionset-pgbasebackup.yaml delete mode 100644 deploy/postgresql/templates/backuptool-pgbasebackup.yaml delete mode 100644 deploy/postgresql/templates/backuptool-pitr.yaml delete mode 100644 deploy/postgresql/templates/backuptool-wal-g.yaml create mode 100644 deploy/qdrant/templates/actionset-datafile.yaml delete mode 100644 deploy/qdrant/templates/backuptool.yaml create mode 100644 deploy/redis/dataprotection/backup.sh create mode 100644 deploy/redis/dataprotection/restore.sh create mode 100644 deploy/redis/templates/backupactionset.yaml delete mode 100644 deploy/redis/templates/backuptool.yaml create mode 100644 internal/dataprotection/action/action.go create mode 100644 internal/dataprotection/action/action_create_vs.go create mode 100644 internal/dataprotection/action/action_create_vs_test.go create mode 100644 internal/dataprotection/action/action_exec.go create mode 100644 internal/dataprotection/action/action_exec_test.go create mode 100644 internal/dataprotection/action/action_job.go create mode 100644 internal/dataprotection/action/action_job_test.go create mode 100644 internal/dataprotection/action/builder_status.go create mode 100644 internal/dataprotection/action/suite_test.go create mode 100644 internal/dataprotection/action/types.go create mode 100644 internal/dataprotection/action/utils.go create mode 100644 internal/dataprotection/backup/deleter.go create mode 100644 internal/dataprotection/backup/deleter_test.go create mode 100644 internal/dataprotection/backup/request.go create mode 100644 internal/dataprotection/backup/request_test.go create mode 100644 internal/dataprotection/backup/scheduler.go create mode 100644 internal/dataprotection/backup/scheduler_test.go create mode 100644 internal/dataprotection/backup/suite_test.go create mode 100644 internal/dataprotection/backup/types.go create mode 100644 internal/dataprotection/backup/utils.go create mode 100644 internal/dataprotection/builder/builder.go create mode 100644 internal/dataprotection/builder/builder_test.go create mode 100644 internal/dataprotection/builder/suite_test.go create mode 100644 internal/dataprotection/errors/errors.go create mode 100644 internal/dataprotection/errors/errors_test.go create mode 100644 internal/dataprotection/restore/builder.go create mode 100644 internal/dataprotection/restore/manager.go create mode 100644 internal/dataprotection/restore/types.go create mode 100644 internal/dataprotection/restore/utils.go create mode 100644 internal/dataprotection/types/constant.go create mode 100644 internal/dataprotection/types/types.go create mode 100644 internal/dataprotection/utils/boolptr/boolptr.go create mode 100644 internal/dataprotection/utils/boolptr/boolptr_test.go create mode 100644 internal/dataprotection/utils/envvar.go create mode 100644 internal/dataprotection/utils/utils.go delete mode 100644 internal/testutil/apps/backup_factory.go delete mode 100644 internal/testutil/apps/backuppolicy_factory.go delete mode 100644 internal/testutil/apps/restorejob_factory.go create mode 100644 internal/testutil/dataprotection/backup_factory.go create mode 100644 internal/testutil/dataprotection/backup_utils.go create mode 100644 internal/testutil/dataprotection/backuppolicy_factory.go rename internal/testutil/{apps => dataprotection}/backuprepo_factory.go (69%) create mode 100644 internal/testutil/dataprotection/backupschedule_factory.go create mode 100644 internal/testutil/dataprotection/constant.go create mode 100644 internal/testutil/dataprotection/k8s_utils.go create mode 100644 internal/testutil/dataprotection/restore_factory.go create mode 100644 internal/testutil/dataprotection/types.go create mode 100644 internal/testutil/dataprotection/utils.go create mode 100644 internal/testutil/dataprotection/vs_factory.go create mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/actionset.go create mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backupschedule.go delete mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backuptool.go create mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_actionset.go create mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backupschedule.go delete mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go create mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restore.go delete mode 100644 pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go rename pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/{restorejob.go => restore.go} (52%) rename pkg/client/informers/externalversions/dataprotection/v1alpha1/{backuptool.go => actionset.go} (58%) create mode 100644 pkg/client/informers/externalversions/dataprotection/v1alpha1/backupschedule.go rename pkg/client/informers/externalversions/dataprotection/v1alpha1/{restorejob.go => restore.go} (57%) rename pkg/client/listers/dataprotection/v1alpha1/{backuptool.go => actionset.go} (53%) create mode 100644 pkg/client/listers/dataprotection/v1alpha1/backupschedule.go create mode 100644 pkg/client/listers/dataprotection/v1alpha1/restore.go delete mode 100644 pkg/client/listers/dataprotection/v1alpha1/restorejob.go create mode 100644 test/testdata/backup/actionset.yaml delete mode 100644 test/testdata/backup/backuptool.yaml diff --git a/PROJECT b/PROJECT index 81732375d10..807651ecd19 100644 --- a/PROJECT +++ b/PROJECT @@ -39,8 +39,8 @@ resources: namespaced: true controller: true domain: kubeblocks.io - group: apps - kind: BackupTool + group: dataprotection + kind: ActionSet path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1 version: v1alpha1 - api: @@ -66,18 +66,9 @@ resources: namespaced: true controller: true domain: kubeblocks.io - group: dataprotection - kind: RestoreJob - path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1 - version: v1alpha1 -- api: - crdVersion: v1 - namespaced: true - controller: true - domain: kubeblocks.io - group: dataprotection + group: apps kind: BackupPolicyTemplate - path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1 + path: github.com/apecloud/kubeblocks/apis/apps/v1alpha1 version: v1alpha1 - api: crdVersion: v1 @@ -168,6 +159,14 @@ resources: kind: BackupRepo path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + controller: true + domain: kubeblocks.io + group: dataprotection + kind: BackupSchedule + path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1 + version: v1alpha1 - api: crdVersion: v1 namespaced: true @@ -190,4 +189,13 @@ resources: defaulting: true validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: kubeblocks.io + group: dataprotection + kind: Restore + path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1 + version: v1alpha1 version: "3" diff --git a/apis/apps/v1alpha1/backuppolicytemplate_types.go b/apis/apps/v1alpha1/backuppolicytemplate_types.go index d7e61f71e7e..34431884905 100644 --- a/apis/apps/v1alpha1/backuppolicytemplate_types.go +++ b/apis/apps/v1alpha1/backuppolicytemplate_types.go @@ -18,6 +18,8 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" ) // BackupPolicyTemplateSpec defines the desired state of BackupPolicyTemplate @@ -54,101 +56,46 @@ type BackupPolicy struct { // +kubebuilder:validation:Pattern:=`^[a-z]([a-z0-9\-]*[a-z0-9])?$` ComponentDefRef string `json:"componentDefRef"` - // retention describe how long the Backup should be retained. if not set, will be retained forever. - // +optional - Retention *RetentionSpec `json:"retention,omitempty"` - - // schedule policy for backup. - // +optional - Schedule Schedule `json:"schedule,omitempty"` - - // the policy for snapshot backup. - // +optional - Snapshot *SnapshotPolicy `json:"snapshot,omitempty"` - - // the policy for datafile backup. - // +optional - Datafile *CommonBackupPolicy `json:"datafile,omitempty"` - - // the policy for logfile backup. - // +optional - Logfile *CommonBackupPolicy `json:"logfile,omitempty"` -} - -type RetentionSpec struct { - // ttl is a time string ending with the 'd'|'D'|'h'|'H' character to describe how long - // the Backup should be retained. if not set, will be retained forever. - // +kubebuilder:validation:Pattern:=`^\d+[d|D|h|H]$` - // +optional - TTL *string `json:"ttl,omitempty"` -} - -type Schedule struct { - // startingDeadlineMinutes defines the deadline in minutes for starting the backup job - // if it misses scheduled time for any reason. + // retentionPeriod determines a duration up to which the backup should be kept. + // controller will remove all backups that are older than the RetentionPeriod. + // For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. + // Sample duration format: + // - years: 2y + // - months: 6mo + // - days: 30d + // - hours: 12h + // - minutes: 30m + // You can also combine the above durations. For example: 30d12h30m // +optional - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=1440 - StartingDeadlineMinutes *int64 `json:"startingDeadlineMinutes,omitempty"` + // +kubebuilder:default="7d" + RetentionPeriod dpv1alpha1.RetentionPeriod `json:"retentionPeriod,omitempty"` - // schedule policy for snapshot backup. - // +optional - Snapshot *SchedulePolicy `json:"snapshot,omitempty"` - - // schedule policy for datafile backup. + // target instance for backup. // +optional - Datafile *SchedulePolicy `json:"datafile,omitempty"` + Target TargetInstance `json:"target"` - // schedule policy for logfile backup. + // schedule policy for backup. // +optional - Logfile *SchedulePolicy `json:"logfile,omitempty"` -} + Schedules []SchedulePolicy `json:"schedules,omitempty"` -type SchedulePolicy struct { - // the cron expression for schedule, the timezone is in UTC. see https://en.wikipedia.org/wiki/Cron. + // backupMethods defines the backup methods. // +kubebuilder:validation:Required - CronExpression string `json:"cronExpression"` - - // enable or disable the schedule. - // +kubebuilder:validation:Required - Enable bool `json:"enable"` + BackupMethods []dpv1alpha1.BackupMethod `json:"backupMethods"` } -type SnapshotPolicy struct { - BasePolicy `json:",inline"` - - // execute hook commands for backup. +type SchedulePolicy struct { + // enabled specifies whether the backup schedule is enabled or not. // +optional - Hooks *BackupPolicyHook `json:"hooks,omitempty"` -} - -type CommonBackupPolicy struct { - BasePolicy `json:",inline"` + Enabled *bool `json:"enabled,omitempty"` - // which backup tool to perform database backup, only support one tool. + // backupMethod specifies the backup method name that is defined in backupPolicy. // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - BackupToolName string `json:"backupToolName,omitempty"` -} - -type BasePolicy struct { - // target instance for backup. - // +optional - Target TargetInstance `json:"target"` - - // the number of automatic backups to retain. Value must be non-negative integer. - // 0 means NO limit on the number of backups. - // +kubebuilder:default=7 - // +optional - BackupsHistoryLimit int32 `json:"backupsHistoryLimit,omitempty"` + BackupMethod string `json:"backupMethod"` - // count of backup stop retries on fail. - // +optional - OnFailAttempted int32 `json:"onFailAttempted,omitempty"` - - // define how to update metadata for backup status. - // +optional - BackupStatusUpdates []BackupStatusUpdate `json:"backupStatusUpdates,omitempty"` + // the cron expression for schedule, the timezone is in UTC. + // see https://en.wikipedia.org/wiki/Cron. + // +kubebuilder:validation:Required + CronExpression string `json:"cronExpression"` } type TargetInstance struct { @@ -185,51 +132,13 @@ type ConnectionCredentialKey struct { // if not set, the default key is "username". // +optional UsernameKey *string `json:"usernameKey,omitempty"` -} - -// BackupPolicyHook defines for the database execute commands before and after backup. -type BackupPolicyHook struct { - // pre backup to perform commands - // +optional - PreCommands []string `json:"preCommands,omitempty"` - // post backup to perform commands - // +optional - PostCommands []string `json:"postCommands,omitempty"` - - // exec command with image - // +optional - Image string `json:"image,omitempty"` - - // which container can exec command - // +optional - ContainerName string `json:"containerName,omitempty"` -} - -type BackupStatusUpdate struct { - // specify the json path of backup object for patch. - // example: manifests.backupLog -- means patch the backup json path of status.manifests.backupLog. - // +optional - Path string `json:"path,omitempty"` + // hostKey specifies the map key of the host in the connection credential secret. + HostKey *string `json:"hostKey,omitempty"` - // which container name that kubectl can execute. - // +optional - ContainerName string `json:"containerName,omitempty"` - - // the shell Script commands to collect backup status metadata. - // The script must exist in the container of ContainerName and the output format must be set to JSON. - // Note that outputting to stderr may cause the result format to not be in JSON. - // +optional - Script string `json:"script,omitempty"` - - // useTargetPodServiceAccount defines whether this job requires the service account of the backup target pod. - // if true, will use the service account of the backup target pod. otherwise, will use the system service account. - // +optional - UseTargetPodServiceAccount bool `json:"useTargetPodServiceAccount,omitempty"` - - // when to update the backup status, pre: before backup, post: after backup - // +kubebuilder:validation:Required - UpdateStage BackupStatusUpdateStage `json:"updateStage"` + // portKey specifies the map key of the port in the connection credential secret. + // +kubebuilder:default=port + PortKey *string `json:"portKey,omitempty"` } // BackupPolicyTemplateStatus defines the observed state of BackupPolicyTemplate diff --git a/apis/apps/v1alpha1/cluster_types.go b/apis/apps/v1alpha1/cluster_types.go index 39a1f728440..f6ff22c0b8b 100644 --- a/apis/apps/v1alpha1/cluster_types.go +++ b/apis/apps/v1alpha1/cluster_types.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" viper "github.com/apecloud/kubeblocks/internal/viperx" @@ -116,18 +116,23 @@ type ClusterBackup struct { // +optional Enabled *bool `json:"enabled,omitempty"` - // retentionPeriod is a time string ending with the 'd'|'D'|'h'|'H' character to describe how long - // the Backup should be retained. if not set, will be retained forever. - // +kubebuilder:validation:Pattern:=`^\d+[d|D|h|H]$` - // +kubebuilder:default="1d" + // retentionPeriod determines a duration up to which the backup should be kept. + // controller will remove all backups that are older than the RetentionPeriod. + // For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. + // Sample duration format: + // - years: 2y + // - months: 6mo + // - days: 30d + // - hours: 12h + // - minutes: 30m + // You can also combine the above durations. For example: 30d12h30m + // +kubebuilder:default="7d" // +optional - RetentionPeriod *string `json:"retentionPeriod,omitempty"` + RetentionPeriod dpv1alpha1.RetentionPeriod `json:"retentionPeriod,omitempty"` - // backup method, support: snapshot, backupTool. - // +kubebuilder:validation:Enum=snapshot;backupTool - // +kubebuilder:validation:Required - // +kubebuilder:default=snapshot - Method dataprotectionv1alpha1.BackupMethod `json:"method"` + // backup method name to use, that is defined in backupPolicy. + // +optional + Method string `json:"method"` // the cron expression for schedule, the timezone is in UTC. see https://en.wikipedia.org/wiki/Cron. // +optional @@ -135,9 +140,9 @@ type ClusterBackup struct { // startingDeadlineMinutes defines the deadline in minutes for starting the backup job // if it misses scheduled time for any reason. - // +optional // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=1440 + // +optional StartingDeadlineMinutes *int64 `json:"startingDeadlineMinutes,omitempty"` // repoName is the name of the backupRepo, if not set, will use the default backupRepo. @@ -350,12 +355,12 @@ type ClusterComponentStatus struct { // consensusSetStatus specifies the mapping of role and pod name. // +optional - //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use MembersStatus instead." + // +kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use MembersStatus instead." ConsensusSetStatus *ConsensusSetStatus `json:"consensusSetStatus,omitempty"` // replicationSetStatus specifies the mapping of role and pod name. // +optional - //+kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use MembersStatus instead." + // +kubebuilder:deprecatedversion:warning="This field is deprecated from KB 0.7.0, use MembersStatus instead." ReplicationSetStatus *ReplicationSetStatus `json:"replicationSetStatus,omitempty"` // members' status. diff --git a/apis/apps/v1alpha1/opsrequest_types.go b/apis/apps/v1alpha1/opsrequest_types.go index 91b8632a4fd..20af28d8e59 100644 --- a/apis/apps/v1alpha1/opsrequest_types.go +++ b/apis/apps/v1alpha1/opsrequest_types.go @@ -372,11 +372,9 @@ type BackupSpec struct { // +optional BackupPolicyName string `json:"backupPolicyName"` - // Backup Type. datafile or logfile or snapshot. If not set, datafile is the default type. - // +kubebuilder:default=datafile - // +kubeBuilder:validation:Enum={datafile,logfile,snapshot} + // Backup method name that is defined in backupPolicy. // +optional - BackupType string `json:"backupType"` + BackupMethod string `json:"backupMethod"` // if backupType is incremental, parentBackupName is required. // +optional diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index 04a4a3d6af3..fa5956b72f4 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,7 @@ along with this program. If not, see . package v1alpha1 import ( + dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -64,26 +65,20 @@ func (in *Affinity) DeepCopy() *Affinity { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupPolicy) DeepCopyInto(out *BackupPolicy) { *out = *in - if in.Retention != nil { - in, out := &in.Retention, &out.Retention - *out = new(RetentionSpec) - (*in).DeepCopyInto(*out) - } - in.Schedule.DeepCopyInto(&out.Schedule) - if in.Snapshot != nil { - in, out := &in.Snapshot, &out.Snapshot - *out = new(SnapshotPolicy) - (*in).DeepCopyInto(*out) - } - if in.Datafile != nil { - in, out := &in.Datafile, &out.Datafile - *out = new(CommonBackupPolicy) - (*in).DeepCopyInto(*out) + in.Target.DeepCopyInto(&out.Target) + if in.Schedules != nil { + in, out := &in.Schedules, &out.Schedules + *out = make([]SchedulePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - if in.Logfile != nil { - in, out := &in.Logfile, &out.Logfile - *out = new(CommonBackupPolicy) - (*in).DeepCopyInto(*out) + if in.BackupMethods != nil { + in, out := &in.BackupMethods, &out.BackupMethods + *out = make([]dataprotectionv1alpha1.BackupMethod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } @@ -97,31 +92,6 @@ func (in *BackupPolicy) DeepCopy() *BackupPolicy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupPolicyHook) DeepCopyInto(out *BackupPolicyHook) { - *out = *in - if in.PreCommands != nil { - in, out := &in.PreCommands, &out.PreCommands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PostCommands != nil { - in, out := &in.PostCommands, &out.PostCommands - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyHook. -func (in *BackupPolicyHook) DeepCopy() *BackupPolicyHook { - if in == nil { - return nil - } - out := new(BackupPolicyHook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupPolicyTemplate) DeepCopyInto(out *BackupPolicyTemplate) { *out = *in @@ -249,42 +219,6 @@ func (in *BackupSpec) DeepCopy() *BackupSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupStatusUpdate) DeepCopyInto(out *BackupStatusUpdate) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatusUpdate. -func (in *BackupStatusUpdate) DeepCopy() *BackupStatusUpdate { - if in == nil { - return nil - } - out := new(BackupStatusUpdate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BasePolicy) DeepCopyInto(out *BasePolicy) { - *out = *in - in.Target.DeepCopyInto(&out.Target) - if in.BackupStatusUpdates != nil { - in, out := &in.BackupStatusUpdates, &out.BackupStatusUpdates - *out = make([]BackupStatusUpdate, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasePolicy. -func (in *BasePolicy) DeepCopy() *BasePolicy { - if in == nil { - return nil - } - out := new(BasePolicy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CPUConstraint) DeepCopyInto(out *CPUConstraint) { *out = *in @@ -372,11 +306,6 @@ func (in *ClusterBackup) DeepCopyInto(out *ClusterBackup) { *out = new(bool) **out = **in } - if in.RetentionPeriod != nil { - in, out := &in.RetentionPeriod, &out.RetentionPeriod - *out = new(string) - **out = **in - } if in.StartingDeadlineMinutes != nil { in, out := &in.StartingDeadlineMinutes, &out.StartingDeadlineMinutes *out = new(int64) @@ -1264,22 +1193,6 @@ func (in *CommandExecutorItem) DeepCopy() *CommandExecutorItem { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonBackupPolicy) DeepCopyInto(out *CommonBackupPolicy) { - *out = *in - in.BasePolicy.DeepCopyInto(&out.BasePolicy) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonBackupPolicy. -func (in *CommonBackupPolicy) DeepCopy() *CommonBackupPolicy { - if in == nil { - return nil - } - out := new(CommonBackupPolicy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentClass) DeepCopyInto(out *ComponentClass) { *out = *in @@ -2167,6 +2080,16 @@ func (in *ConnectionCredentialKey) DeepCopyInto(out *ConnectionCredentialKey) { *out = new(string) **out = **in } + if in.HostKey != nil { + in, out := &in.HostKey, &out.HostKey + *out = new(string) + **out = **in + } + if in.PortKey != nil { + in, out := &in.PortKey, &out.PortKey + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionCredentialKey. @@ -3346,65 +3269,15 @@ func (in *RestoreFromSpec) DeepCopy() *RestoreFromSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RetentionSpec) DeepCopyInto(out *RetentionSpec) { - *out = *in - if in.TTL != nil { - in, out := &in.TTL, &out.TTL - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionSpec. -func (in *RetentionSpec) DeepCopy() *RetentionSpec { - if in == nil { - return nil - } - out := new(RetentionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Schedule) DeepCopyInto(out *Schedule) { +func (in *SchedulePolicy) DeepCopyInto(out *SchedulePolicy) { *out = *in - if in.StartingDeadlineMinutes != nil { - in, out := &in.StartingDeadlineMinutes, &out.StartingDeadlineMinutes - *out = new(int64) - **out = **in - } - if in.Snapshot != nil { - in, out := &in.Snapshot, &out.Snapshot - *out = new(SchedulePolicy) - **out = **in - } - if in.Datafile != nil { - in, out := &in.Datafile, &out.Datafile - *out = new(SchedulePolicy) - **out = **in - } - if in.Logfile != nil { - in, out := &in.Logfile, &out.Logfile - *out = new(SchedulePolicy) + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. -func (in *Schedule) DeepCopy() *Schedule { - if in == nil { - return nil - } - out := new(Schedule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SchedulePolicy) DeepCopyInto(out *SchedulePolicy) { - *out = *in -} - // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicy. func (in *SchedulePolicy) DeepCopy() *SchedulePolicy { if in == nil { @@ -3747,27 +3620,6 @@ func (in *ShellTrigger) DeepCopy() *ShellTrigger { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapshotPolicy) DeepCopyInto(out *SnapshotPolicy) { - *out = *in - in.BasePolicy.DeepCopyInto(&out.BasePolicy) - if in.Hooks != nil { - in, out := &in.Hooks, &out.Hooks - *out = new(BackupPolicyHook) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicy. -func (in *SnapshotPolicy) DeepCopy() *SnapshotPolicy { - if in == nil { - return nil - } - out := new(SnapshotPolicy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = *in diff --git a/apis/dataprotection/v1alpha1/actionset_types.go b/apis/dataprotection/v1alpha1/actionset_types.go new file mode 100644 index 00000000000..8dd1dc9455d --- /dev/null +++ b/apis/dataprotection/v1alpha1/actionset_types.go @@ -0,0 +1,249 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ActionSetSpec defines the desired state of ActionSet +type ActionSetSpec struct { + // backupType specifies the backup type, supported values: Full, Continuous. + // Full means full backup. + // Incremental means back up data that have changed since the last backup (full or incremental). + // Differential means back up data that have changed since the last full backup. + // Continuous will back up the transaction log continuously, the PITR (Point in Time Recovery). + // can be performed based on the continuous backup and full backup. + // +kubebuilder:validation:Enum={Full,Incremental,Differential,Continuous} + // +kubebuilder:default=Full + // +kubebuilder:validation:Required + BackupType BackupType `json:"backupType"` + + // List of environment variables to set in the container. + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` + + // backup specifies the backup action. + // +optional + Backup *BackupActionSpec `json:"backup,omitempty"` + + // restore specifies the restore action. + // +optional + Restore *RestoreActionSpec `json:"restore,omitempty"` +} + +// ActionSetStatus defines the observed state of ActionSet +type ActionSetStatus struct { + // phase - in list of [Available,Unavailable] + // +optional + Phase Phase `json:"phase,omitempty"` + + // A human-readable message indicating details about why the ActionSet is in this phase. + // +optional + Message string `json:"message,omitempty"` + + // generation number + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// BackupType the backup type. +// +enum +// +kubebuilder:validation:Enum={Full,Incremental,Differential,Continuous} +type BackupType string + +const ( + BackupTypeFull BackupType = "Full" + BackupTypeIncremental BackupType = "Incremental" + BackupTypeDifferential BackupType = "Differential" + BackupTypeContinuous BackupType = "Continuous" +) + +type BackupActionSpec struct { + // backupData specifies the backup data action. + // +kubebuilder:validation:Required + BackupData *BackupDataActionSpec `json:"backupData,omitempty"` + + // preBackup specifies a hook that should be executed before the backup. + // +optional + PreBackup []ActionSpec `json:"preBackup,omitempty"` + + // postBackup specifies a hook that should be executed after the backup. + // +optional + PostBackup []ActionSpec `json:"postBackup,omitempty"` +} + +// BackupDataActionSpec defines how to back up data. +type BackupDataActionSpec struct { + JobActionSpec `json:",inline"` + + // syncProgress specifies whether to sync the backup progress and its interval seconds. + // +optional + SyncProgress *SyncProgress `json:"syncProgress,omitempty"` +} + +type SyncProgress struct { + // enabled specifies whether to sync the backup progress. If enabled, + // a sidecar container will be created to sync the backup progress to the + // Backup CR status. + // +optional + Enabled *bool `json:"enabled,omitempty"` + + // intervalSeconds specifies the interval seconds to sync the backup progress. + // +optional + // +kubebuilder:default=60 + IntervalSeconds *int32 `json:"intervalSeconds,omitempty"` +} + +// RestoreActionSpec defines how to restore data. +type RestoreActionSpec struct { + // prepareData specifies the action to prepare data. + // +optional + PrepareData *JobActionSpec `json:"prepareData,omitempty"` + + // postReady specifies the action to execute after the data is ready. + // +optional + PostReady []ActionSpec `json:"postReady,omitempty"` +} + +// ActionSpec defines an action that should be executed. Only one of the fields may be set. +type ActionSpec struct { + // exec specifies the action should be executed by the pod exec API in a container. + // +optional + Exec *ExecActionSpec `json:"exec,omitempty"` + + // job specifies the action should be executed by a Kubernetes Job. + // +optional + Job *JobActionSpec `json:"job,omitempty"` +} + +// ExecActionSpec is an action that uses the pod exec API to execute a command in a container +// in a pod. +type ExecActionSpec struct { + // container is the container in the pod where the command should be executed. + // If not specified, the pod's first container is used. + // +optional + Container string `json:"container,omitempty"` + + // Command is the command and arguments to execute. + // +kubebuilder:validation:MinItems=1 + Command []string `json:"command"` + + // OnError specifies how should behave if it encounters an error executing this action. + // +optional + // +kubebuilder:default=Fail + OnError ActionErrorMode `json:"onError,omitempty"` + + // Timeout defines the maximum amount of time should wait for the hook to complete before + // considering the execution a failure. + // +optional + Timeout metav1.Duration `json:"timeout,omitempty"` +} + +// JobActionSpec is an action that creates a Kubernetes Job to execute a command. +type JobActionSpec struct { + // image specifies the image of backup container. + // +kubebuilder:validation:Required + Image string `json:"image"` + + // runOnTargetPodNode specifies whether to run the job workload on the + // target pod node. If backup container should mount the target pod's + // volume, this field should be set to true. + // +optional + // +kubebuilder:default=false + RunOnTargetPodNode *bool `json:"runOnTargetPodNode,omitempty"` + + // command specifies the commands to back up the volume data. + // +kubebuilder:validation:Required + Command []string `json:"command"` + + // OnError specifies how should behave if it encounters an error executing + // this action. + // +optional + // +kubebuilder:default=Fail + OnError ActionErrorMode `json:"onError,omitempty"` +} + +// ActionErrorMode defines how should treat an error from an action. +// +kubebuilder:validation:Enum=Continue;Fail +type ActionErrorMode string + +const ( + // ActionErrorModeContinue means that an error from an action is acceptable. + ActionErrorModeContinue ActionErrorMode = "Continue" + + // ActionErrorModeFail means that an error from an action is problematic. + ActionErrorModeFail ActionErrorMode = "Fail" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:openapi-gen=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories={kubeblocks},scope=Cluster,shortName=as +// +kubebuilder:printcolumn:name="BACKUP-TYPE",type="string",JSONPath=".spec.backupType" +// +kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// ActionSet is the Schema for the actionsets API +type ActionSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ActionSetSpec `json:"spec,omitempty"` + Status ActionSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ActionSetList contains a list of ActionSet +type ActionSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ActionSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ActionSet{}, &ActionSetList{}) +} + +func (r *ActionSet) HasPrepareDataStage() bool { + if r == nil || r.Spec.Restore == nil { + return false + } + return r.Spec.Restore.PrepareData != nil +} + +func (r *ActionSet) HasPostReadyStage() bool { + if r == nil || r.Spec.Restore == nil { + return false + } + return len(r.Spec.Restore.PostReady) > 0 +} diff --git a/apis/dataprotection/v1alpha1/backup_types.go b/apis/dataprotection/v1alpha1/backup_types.go index b2218617b26..1698c10d0f0 100644 --- a/apis/dataprotection/v1alpha1/backup_types.go +++ b/apis/dataprotection/v1alpha1/backup_types.go @@ -17,175 +17,278 @@ limitations under the License. package v1alpha1 import ( - "fmt" - "sort" - - "golang.org/x/exp/slices" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // BackupSpec defines the desired state of Backup. type BackupSpec struct { - // Which backupPolicy is applied to perform this backup + // Which backupPolicy is applied to perform this backup. // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` BackupPolicyName string `json:"backupPolicyName"` - // Backup Type. datafile or logfile or snapshot. If not set, datafile is the default type. - // +kubebuilder:default=datafile - BackupType BackupType `json:"backupType"` + // backupMethod specifies the backup method name that is defined in backupPolicy. + // +kubebuilder:validation:Required + BackupMethod string `json:"backupMethod"` + + // deletionPolicy determines whether the backup contents stored in backup repository + // should be deleted when the backup custom resource is deleted. + // Supported values are "Retain" and "Delete". + // "Retain" means that the backup content and its physical snapshot on backup repository are kept. + // "Delete" means that the backup content and its physical snapshot on backup repository are deleted. + // +kubebuilder:validation:Enum=Delete;Retain + // +kubebuilder:validation:Required + // +kubebuilder:default=Delete + DeletionPolicy BackupDeletionPolicy `json:"deletionPolicy,omitempty"` + + // retentionPeriod determines a duration up to which the backup should be kept. + // controller will remove all backups that are older than the RetentionPeriod. + // For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. + // Sample duration format: + // - years: 2y + // - months: 6mo + // - days: 30d + // - hours: 12h + // - minutes: 30m + // You can also combine the above durations. For example: 30d12h30m + // +kubebuilder:default="7d" + // +optional + RetentionPeriod RetentionPeriod `json:"retentionPeriod,omitempty"` - // if backupType is incremental, parentBackupName is required. + // parentBackupName determines the parent backup name for incremental or + // differential backup. // +optional ParentBackupName string `json:"parentBackupName,omitempty"` } // BackupStatus defines the observed state of Backup. type BackupStatus struct { + // formatVersion is the backup format version, including major, minor and patch version. // +optional - Phase BackupPhase `json:"phase,omitempty"` + FormatVersion string `json:"formatVersion,omitempty"` - // Records parentBackupName if backupType is incremental. + // phase is the current state of the Backup. // +optional - ParentBackupName string `json:"parentBackupName,omitempty"` + Phase BackupPhase `json:"phase,omitempty"` - // The date and time when the Backup is eligible for garbage collection. - // 'null' means the Backup is NOT be cleaned except delete manual. + // expiration is when this backup is eligible for garbage collection. + // 'null' means the Backup will NOT be cleaned except delete manual. // +optional Expiration *metav1.Time `json:"expiration,omitempty"` - // Date/time when the backup started being processed. + // startTimestamp records the time a backup was started. + // The server's time is used for StartTimestamp. // +optional StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` - // Date/time when the backup finished being processed. + // completionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // The server's time is used for CompletionTimestamp. // +optional CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` // The duration time of backup execution. - // When converted to a string, the form is "1h2m0.5s". + // When converted to a string, the format is "1h2m0.5s". // +optional Duration *metav1.Duration `json:"duration,omitempty"` - // Backup total size. - // A string with capacity units in the form of "1Gi", "1Mi", "1Ki". + // totalSize is the total size of backed up data size. + // A string with capacity units in the format of "1Gi", "1Mi", "1Ki". // +optional TotalSize string `json:"totalSize,omitempty"` - // The reason for a backup failure. + // failureReason is an error that caused the backup to fail. // +optional FailureReason string `json:"failureReason,omitempty"` - // remoteVolume saves the backup data. + // backupRepoName is the name of the backup repository. + // +optional + BackupRepoName string `json:"backupRepoName,omitempty"` + + // path is the directory inside the backup repository where the backup data is stored. + // It is an absolute path in the backup repository. + // +optional + Path string `json:"path,omitempty"` + + // persistentVolumeClaimName is the name of the persistent volume claim that + // is used to store the backup data. // +optional PersistentVolumeClaimName string `json:"persistentVolumeClaimName,omitempty"` - // logFilePersistentVolumeClaimName saves the logfile backup data. + // timeRange records the time range of backed up data, for PITR, this is the + // time range of recoverable data. // +optional - LogFilePersistentVolumeClaimName string `json:"logFilePersistentVolumeClaimName,omitempty"` + TimeRange *BackupTimeRange `json:"timeRange,omitempty"` - // backupToolName references the backup tool name. + // target records the target information for this backup. // +optional - BackupToolName string `json:"backupToolName,omitempty"` + Target *BackupTarget `json:"target,omitempty"` - // sourceCluster records the source cluster information for this backup. - SourceCluster string `json:"sourceCluster,omitempty"` + // backupMethod records the backup method information for this backup. + // Refer to BackupMethod for more details. + // +optional + BackupMethod *BackupMethod `json:"backupMethod,omitempty"` - // availableReplicas available replicas for statefulSet which created by backup. + // actions records the actions information for this backup. // +optional - AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + Actions []ActionStatus `json:"actions,omitempty"` - // manifests determines the backup metadata info. + // volumeSnapshots records the volume snapshot status for the action. // +optional - Manifests *ManifestsStatus `json:"manifests,omitempty"` + VolumeSnapshots []VolumeSnapshotStatus `json:"volumeSnapshots,omitempty"` } -type ManifestsStatus struct { - // backupLog records startTime and stopTime of data logging. +// BackupTimeRange records the time range of backed up data, for PITR, this is the +// time range of recoverable data. +type BackupTimeRange struct { + // start records the start time of backup. // +optional - BackupLog *BackupLogStatus `json:"backupLog,omitempty"` + Start *metav1.Time `json:"start,omitempty"` - // target records the target cluster metadata string, which is in JSON format. + // end records the end time of backup. // +optional - Target string `json:"target,omitempty"` + End *metav1.Time `json:"end,omitempty"` +} - // snapshot records the volume snapshot metadata. - // +optional - Snapshot *BackupSnapshotStatus `json:"backupSnapshot,omitempty"` +// BackupDeletionPolicy describes a policy for end-of-life maintenance of backup content. +// +enum +// +kubebuilder:validation:Enum={Delete,Retain} +type BackupDeletionPolicy string + +const ( + BackupDeletionPolicyDelete BackupDeletionPolicy = "Delete" + BackupDeletionPolicyRetain BackupDeletionPolicy = "Retain" +) + +// BackupPhase is a string representation of the lifecycle phase of a Backup. +// +enum +// +kubebuilder:validation:Enum={New,InProgress,Running,Completed,Failed,Deleting} +type BackupPhase string + +const ( + // BackupPhaseNew means the backup has been created but not yet processed by + // the BackupController. + BackupPhaseNew BackupPhase = "New" + + // BackupPhaseRunning means the backup is currently executing. + BackupPhaseRunning BackupPhase = "Running" + + // BackupPhaseCompleted means the backup has run successfully without errors. + BackupPhaseCompleted BackupPhase = "Completed" - // backupTool records information about backup files generated by the backup tool. + // BackupPhaseFailed means the backup ran but encountered an error that + // prevented it from completing successfully. + BackupPhaseFailed BackupPhase = "Failed" + + // BackupPhaseDeleting means the backup and all its associated data are being deleted. + BackupPhaseDeleting BackupPhase = "Deleting" +) + +type ActionStatus struct { + // name is the name of the action. + Name string `json:"name,omitempty"` + + // phase is the current state of the action. // +optional - BackupTool *BackupToolManifestsStatus `json:"backupTool,omitempty"` + Phase ActionPhase `json:"phase,omitempty"` - // userContext stores some loosely structured and extensible information. + // startTimestamp records the time an action was started. // +optional - UserContext map[string]string `json:"userContext,omitempty"` -} + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` -type BackupLogStatus struct { - // startTime records the start time of data logging. + // completionTimestamp records the time an action was completed. // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` - // stopTime records the stop time of data logging. + // failureReason is an error that caused the backup to fail. // +optional - StopTime *metav1.Time `json:"stopTime,omitempty"` -} + FailureReason string `json:"failureReason,omitempty"` -type BackupSnapshotStatus struct { - // volumeSnapshotName records the volumeSnapshot name. + // actionType is the type of the action. // +optional - VolumeSnapshotName string `json:"volumeSnapshotName,omitempty"` + ActionType ActionType `json:"actionType,omitempty"` - // volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent - // object representing an existing volume snapshot. - // This field should be set if the snapshot already exists and only needs a representation in Kubernetes. - // This field is immutable. + // availableReplicas available replicas for statefulSet action. // +optional - VolumeSnapshotContentName string `json:"volumeSnapshotContentName,omitempty"` -} + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` -type BackupToolManifestsStatus struct { - // filePath records the file path of backup. + // objectRef is the object reference for the action. // +optional - FilePath string `json:"filePath,omitempty"` + ObjectRef *corev1.ObjectReference `json:"objectRef,omitempty"` - // logFilePath records the log file path of backup. + // totalSize is the total size of backed up data size. + // A string with capacity units in the format of "1Gi", "1Mi", "1Ki". // +optional - LogFilePath string `json:"logFilePath,omitempty"` + TotalSize string `json:"totalSize,omitempty"` - // volumeName records volume name of backup data pvc. + // timeRange records the time range of backed up data, for PITR, this is the + // time range of recoverable data. // +optional - VolumeName string `json:"volumeName,omitempty"` + TimeRange *BackupTimeRange `json:"timeRange,omitempty"` - // Backup upload total size. - // A string with capacity units in the form of "1Gi", "1Mi", "1Ki". + // volumeSnapshots records the volume snapshot status for the action. // +optional - UploadTotalSize string `json:"uploadTotalSize,omitempty"` + VolumeSnapshots []VolumeSnapshotStatus `json:"volumeSnapshots,omitempty"` +} - // checksum of backup file, generated by md5 or sha1 or sha256. +type VolumeSnapshotStatus struct { + // name is the name of the volume snapshot. + Name string `json:"name,omitempty"` + + // contentName is the name of the volume snapshot content. + ContentName string `json:"contentName,omitempty"` + + // volumeName is the name of the volume. // +optional - Checksum string `json:"checksum,omitempty"` + VolumeName string `json:"volumeName,omitempty"` - // backup checkpoint, for incremental backup. + // size is the size of the volume snapshot. // +optional - Checkpoint string `json:"checkpoint,omitempty"` + Size string `json:"size,omitempty"` } +type ActionPhase string + +const ( + // ActionPhaseNew means the action has been created but not yet processed by + // the BackupController. + ActionPhaseNew ActionPhase = "New" + + // ActionPhaseRunning means the action is currently executing. + ActionPhaseRunning ActionPhase = "Running" + + // ActionPhaseCompleted means the action has run successfully without errors. + ActionPhaseCompleted ActionPhase = "Completed" + + // ActionPhaseFailed means the action ran but encountered an error that + ActionPhaseFailed ActionPhase = "Failed" +) + +type ActionType string + +const ( + ActionTypeJob ActionType = "Job" + ActionTypeStatefulSet ActionType = "StatefulSet" + ActionTypeNone ActionType = "" +) + // +genclient // +k8s:openapi-gen=true // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:categories={kubeblocks},scope=Namespaced -// +kubebuilder:printcolumn:name="TYPE",type=string,JSONPath=`.spec.backupType` +// +kubebuilder:printcolumn:name="POLICY",type=string,JSONPath=`.spec.backupPolicyName` +// +kubebuilder:printcolumn:name="METHOD",type=string,JSONPath=`.spec.backupMethod` +// +kubebuilder:printcolumn:name="REPO",type=string,JSONPath=`.status.backupRepoName` // +kubebuilder:printcolumn:name="STATUS",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="SOURCE-CLUSTER",type=string,JSONPath=`.status.sourceCluster` // +kubebuilder:printcolumn:name="TOTAL-SIZE",type=string,JSONPath=`.status.totalSize` // +kubebuilder:printcolumn:name="DURATION",type=string,JSONPath=`.status.duration` -// +kubebuilder:printcolumn:name="CREATE-TIME",type=string,JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="CREATION-TIME",type=string,JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="COMPLETION-TIME",type=string,JSONPath=`.status.completionTimestamp` +// +kubebuilder:printcolumn:name="EXPIRATION-TIME",type=string,JSONPath=`.status.expiration` -// Backup is the Schema for the backups API (defined by User). +// Backup is the Schema for the backups API. type Backup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -207,96 +310,22 @@ func init() { SchemeBuilder.Register(&Backup{}, &BackupList{}) } -// Validate validates the BackupSpec and returns an error if invalid. -func (r *BackupSpec) Validate(backupPolicy *BackupPolicy) error { - notSupportedMessage := "backupPolicy: %s not supports %s backup in backupPolicy" - switch r.BackupType { - case BackupTypeSnapshot: - if backupPolicy.Spec.Snapshot == nil { - return fmt.Errorf(notSupportedMessage, r.BackupPolicyName, BackupTypeSnapshot) - } - case BackupTypeDataFile: - if backupPolicy.Spec.Datafile == nil { - return fmt.Errorf(notSupportedMessage, r.BackupPolicyName, BackupTypeDataFile) - } - case BackupTypeLogFile: - if backupPolicy.Spec.Logfile == nil { - return fmt.Errorf(notSupportedMessage, r.BackupPolicyName, BackupTypeLogFile) - } - } - return nil -} - -// GetStartTime gets the backup start time. the default return is status.startTime, unless status.manifests.backupLog.startTime is not nil. -func (r *BackupStatus) GetStartTime() *metav1.Time { - if r.Manifests != nil && r.Manifests.BackupLog != nil && r.Manifests.BackupLog.StartTime != nil { - return r.Manifests.BackupLog.StartTime - } - return r.StartTimestamp -} - -// GetStopTime gets the backup stop time. the default return is status.completionTimestamp, unless status.manifests.backupLog.stopTime is not nil. -func (r *BackupStatus) GetStopTime() *metav1.Time { - if r.Manifests != nil && r.Manifests.BackupLog != nil && r.Manifests.BackupLog.StopTime != nil { - return r.Manifests.BackupLog.StopTime +// GetStartTime gets the backup start time. Default return status.startTimestamp, +// unless status.timeRange.startTime is not nil. +func (r *Backup) GetStartTime() *metav1.Time { + s := r.Status + if s.TimeRange != nil && s.TimeRange.Start != nil { + return s.TimeRange.Start } - return r.CompletionTimestamp + return s.StartTimestamp } -// GetRecoverableTimeRange returns the recoverable time range array. -func GetRecoverableTimeRange(backups []Backup) []BackupLogStatus { - sort.Slice(backups, func(i, j int) bool { - if backups[i].Status.StartTimestamp == nil && backups[j].Status.StartTimestamp != nil { - return false - } - if backups[i].Status.StartTimestamp != nil && backups[j].Status.StartTimestamp == nil { - return true - } - if backups[i].Status.StartTimestamp.Equal(backups[j].Status.StartTimestamp) { - return backups[i].Name < backups[j].Name - } - return backups[i].Status.StartTimestamp.Before(backups[j].Status.StartTimestamp) - }) - getLogfileStartTimeAndStopTime := func() (*metav1.Time, *metav1.Time) { - var ( - startTime *metav1.Time - stopTime *metav1.Time - ) - for _, b := range backups { - if b.Spec.BackupType != BackupTypeLogFile { - continue - } - startTime = b.Status.GetStartTime() - stopTime = b.Status.GetStopTime() - break - } - return startTime, stopTime - } - logfileStartTime, logfileStopTime := getLogfileStartTimeAndStopTime() - // if not exists the startTime/stopTime of the first log file, return - if logfileStartTime.IsZero() || logfileStopTime.IsZero() { - return nil - } - getFirstRecoverableBaseBackup := func() *Backup { - for _, b := range backups { - if !slices.Contains([]BackupType{BackupTypeDataFile, BackupTypeSnapshot}, b.Spec.BackupType) || - b.Status.Phase != BackupCompleted { - continue - } - backupStopTime := b.Status.GetStopTime() - // checks if the baseBackup stop time is between logfileStartTime and logfileStopTime. - if !backupStopTime.Before(logfileStartTime) && - backupStopTime.Before(logfileStopTime) { - return &b - } - } - return nil - } - firstRecoverableBaseBackup := getFirstRecoverableBaseBackup() - if firstRecoverableBaseBackup == nil { - return nil +// GetEndTime gets the backup end time. Default return status.completionTimestamp, +// unless status.timeRange.endTime is not nil. +func (r *Backup) GetEndTime() *metav1.Time { + s := r.Status + if s.TimeRange != nil && s.TimeRange.End != nil { + return s.TimeRange.End } - // range of recoverable time - return []BackupLogStatus{{StopTime: logfileStopTime, - StartTime: firstRecoverableBaseBackup.Status.GetStopTime()}} + return s.CompletionTimestamp } diff --git a/apis/dataprotection/v1alpha1/backup_types_test.go b/apis/dataprotection/v1alpha1/backup_types_test.go deleted file mode 100644 index c5db0709711..00000000000 --- a/apis/dataprotection/v1alpha1/backup_types_test.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "testing" - "time" - - . "github.com/onsi/gomega" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidate(t *testing.T) { - g := NewGomegaWithT(t) - - backupPolicy := &BackupPolicy{Spec: BackupPolicySpec{Snapshot: &SnapshotPolicy{}}} - backupSpec := &BackupSpec{BackupType: BackupTypeSnapshot} - g.Expect(backupSpec.Validate(backupPolicy)).Should(Succeed()) - - backupPolicy = &BackupPolicy{} - backupSpec = &BackupSpec{BackupType: BackupTypeSnapshot} - g.Expect(backupSpec.Validate(backupPolicy)).Should(HaveOccurred()) - - backupSpec = &BackupSpec{BackupType: BackupTypeDataFile} - g.Expect(backupSpec.Validate(backupPolicy)).Should(HaveOccurred()) - - backupSpec = &BackupSpec{BackupType: BackupTypeLogFile} - g.Expect(backupSpec.Validate(backupPolicy)).Should(HaveOccurred()) -} - -func TestGetRecoverableTimeRange(t *testing.T) { - g := NewGomegaWithT(t) - - // test empty backups - emptyBackups := []Backup{} - g.Expect(GetRecoverableTimeRange(emptyBackups)).Should(BeEmpty()) - - now := metav1.Now() - backupSnapshot := Backup{ - ObjectMeta: metav1.ObjectMeta{Name: "backup-snapshot"}, - Spec: BackupSpec{BackupType: BackupTypeSnapshot}, - Status: BackupStatus{ - Phase: BackupCompleted, - Manifests: &ManifestsStatus{ - BackupLog: &BackupLogStatus{ - StartTime: &now, - StopTime: &now, - }, - }, - }, - } - noStartTimeSnapshotBackup := Backup{ - ObjectMeta: metav1.ObjectMeta{Name: "backup-snapshot1"}, - Spec: BackupSpec{BackupType: BackupTypeSnapshot}, - Status: BackupStatus{ - Phase: BackupCompleted, - Manifests: &ManifestsStatus{ - BackupLog: &BackupLogStatus{ - StopTime: &now, - }, - }, - }, - } - noTimeSnapshotBackup := Backup{ - ObjectMeta: metav1.ObjectMeta{Name: "backup-snapshot2"}, - Spec: BackupSpec{BackupType: BackupTypeSnapshot}, - Status: BackupStatus{ - Phase: BackupCompleted, - }, - } - stopTimeGTLogFileStopTimeBaseBackup := Backup{ - ObjectMeta: metav1.ObjectMeta{Name: "backup-snapshot2"}, - Spec: BackupSpec{BackupType: BackupTypeSnapshot}, - Status: BackupStatus{ - Phase: BackupCompleted, - Manifests: &ManifestsStatus{ - BackupLog: &BackupLogStatus{ - StartTime: &now, - StopTime: &metav1.Time{Time: now.Add(time.Minute * 61)}, - }, - }, - }, - } - backupLogfile := Backup{ - ObjectMeta: metav1.ObjectMeta{Name: "backup-logfile"}, - Spec: BackupSpec{BackupType: BackupTypeLogFile}, - Status: BackupStatus{ - Phase: BackupCompleted, - Manifests: &ManifestsStatus{ - BackupLog: &BackupLogStatus{ - StartTime: &now, - StopTime: &metav1.Time{Time: now.Add(time.Hour)}, - }, - }, - }, - } - - backups := []Backup{backupSnapshot, backupLogfile, {}} - g.Expect(GetRecoverableTimeRange(backups)).ShouldNot(BeEmpty()) - - backups = []Backup{noStartTimeSnapshotBackup, backupLogfile, {}} - g.Expect(GetRecoverableTimeRange(backups)).ShouldNot(BeEmpty()) - - backups = []Backup{backupLogfile, noTimeSnapshotBackup} - g.Expect(GetRecoverableTimeRange(backups)).Should(BeEmpty()) - - backups = []Backup{backupLogfile, stopTimeGTLogFileStopTimeBaseBackup} - g.Expect(GetRecoverableTimeRange(backups)).Should(BeEmpty()) -} - -func TestGetStartTime(t *testing.T) { - startTimestamp := metav1.Now() - backTimeRangeStartTime := metav1.Time{Time: time.Now().Add(10 * time.Second)} - backup := Backup{ - ObjectMeta: metav1.ObjectMeta{Name: "backup1"}, - Spec: BackupSpec{BackupType: BackupTypeSnapshot}, - Status: BackupStatus{ - Phase: BackupCompleted, - StartTimestamp: &startTimestamp, - Manifests: &ManifestsStatus{ - BackupLog: &BackupLogStatus{ - StartTime: &backTimeRangeStartTime, - }, - }, - }, - } - g := NewGomegaWithT(t) - g.Expect(backup.Status.GetStartTime().Second()).Should(Equal(backTimeRangeStartTime.Second())) - - backup.Status.Manifests.BackupLog.StartTime = nil - g.Expect(backup.Status.GetStartTime().Second()).Should(Equal(startTimestamp.Second())) -} - -func TestGetStopTime(t *testing.T) { - stopTimestamp := metav1.Now() - backTimeRangeStopTime := metav1.Time{Time: time.Now().Add(10 * time.Second)} - backup := Backup{ - ObjectMeta: metav1.ObjectMeta{Name: "backup1"}, - Spec: BackupSpec{BackupType: BackupTypeSnapshot}, - Status: BackupStatus{ - Phase: BackupCompleted, - CompletionTimestamp: &stopTimestamp, - Manifests: &ManifestsStatus{ - BackupLog: &BackupLogStatus{ - StopTime: &backTimeRangeStopTime, - }, - }, - }, - } - g := NewGomegaWithT(t) - g.Expect(backup.Status.GetStopTime().Second()).Should(Equal(backTimeRangeStopTime.Second())) - - backup.Status.Manifests.BackupLog.StopTime = nil - g.Expect(backup.Status.GetStopTime().Second()).Should(Equal(stopTimestamp.Second())) -} diff --git a/apis/dataprotection/v1alpha1/backuppolicy_types.go b/apis/dataprotection/v1alpha1/backuppolicy_types.go index 727cb72d70b..89ecc1a7160 100644 --- a/apis/dataprotection/v1alpha1/backuppolicy_types.go +++ b/apis/dataprotection/v1alpha1/backuppolicy_types.go @@ -17,292 +17,222 @@ limitations under the License. package v1alpha1 import ( - "fmt" - "strconv" - "strings" - "time" - - "k8s.io/apimachinery/pkg/api/resource" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // BackupPolicySpec defines the desired state of BackupPolicy type BackupPolicySpec struct { - // retention describe how long the Backup should be retained. if not set, will be retained forever. - // +optional - Retention *RetentionSpec `json:"retention,omitempty"` - - // schedule policy for backup. - // +optional - Schedule Schedule `json:"schedule,omitempty"` - - // the policy for snapshot backup. - // +optional - Snapshot *SnapshotPolicy `json:"snapshot,omitempty"` - - // the policy for datafile backup. - // +optional - Datafile *CommonBackupPolicy `json:"datafile,omitempty"` - - // the policy for logfile backup. + // backupRepoName is the name of BackupRepo and the backup data will be + // stored in this repository. If not set, will be stored in the default + // backup repository. + // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` // +optional - Logfile *CommonBackupPolicy `json:"logfile,omitempty"` -} + BackupRepoName *string `json:"backupRepoName,omitempty"` -type RetentionSpec struct { - // ttl is a time string ending with the 'd'|'D'|'h'|'H' character to describe how long - // the Backup should be retained. if not set, will be retained forever. - // +kubebuilder:validation:Pattern:=`^\d+[d|D|h|H]$` + // pathPrefix is the directory inside the backup repository to store the backup content. + // It is a relative to the path of the backup repository. // +optional - TTL *string `json:"ttl,omitempty"` -} + PathPrefix string `json:"pathPrefix,omitempty"` -type Schedule struct { - // startingDeadlineMinutes defines the deadline in minutes for starting the backup job - // if it misses scheduled time for any reason. + // Specifies the number of retries before marking the backup failed. // +optional // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=1440 - StartingDeadlineMinutes *int64 `json:"startingDeadlineMinutes,omitempty"` - - // schedule policy for snapshot backup. - // +optional - Snapshot *SchedulePolicy `json:"snapshot,omitempty"` - - // schedule policy for datafile backup. - // +optional - Datafile *SchedulePolicy `json:"datafile,omitempty"` - - // schedule policy for logfile backup. - // +optional - Logfile *SchedulePolicy `json:"logfile,omitempty"` -} + // +kubebuilder:validation:Maximum=10 + BackoffLimit *int32 `json:"backoffLimit,omitempty"` -type SchedulePolicy struct { - // the cron expression for schedule, the timezone is in UTC. see https://en.wikipedia.org/wiki/Cron. + // target specifies the target information to back up. // +kubebuilder:validation:Required - CronExpression string `json:"cronExpression"` + Target *BackupTarget `json:"target"` - // enable or disable the schedule. + // backupMethods defines the backup methods. // +kubebuilder:validation:Required - Enable bool `json:"enable"` + BackupMethods []BackupMethod `json:"backupMethods"` } -type SnapshotPolicy struct { - BasePolicy `json:",inline"` +type BackupTarget struct { + // podSelector is used to find the target pod. The volumes of the target pod + // will be backed up. + // +kube:validation:Required + PodSelector *PodSelector `json:"podSelector,omitempty"` - // execute hook commands for backup. + // connectionCredential specifies the connection credential to connect to the + // target database cluster. // +optional - Hooks *BackupPolicyHook `json:"hooks,omitempty"` -} + ConnectionCredential *ConnectionCredential `json:"connectionCredential,omitempty"` -type CommonBackupPolicy struct { - BasePolicy `json:",inline"` - - // refer to PersistentVolumeClaim and the backup data will be stored in the corresponding persistent volume. + // resources specifies the kubernetes resources to back up. // +optional - PersistentVolumeClaim PersistentVolumeClaim `json:"persistentVolumeClaim,omitempty"` + Resources *KubeResources `json:"resources,omitempty"` - // refer to BackupRepo and the backup data will be stored in the corresponding repo. - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - // +optional - BackupRepoName *string `json:"backupRepoName,omitempty"` - - // which backup tool to perform database backup, only support one tool. + // serviceAccountName specifies the service account to run the backup workload. // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - BackupToolName string `json:"backupToolName,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` } -type PersistentVolumeClaim struct { - // the name of PersistentVolumeClaim to store backup data. - // +optional - Name *string `json:"name,omitempty"` +type PodSelector struct { + // labelsSelector is the label selector to filter the target pods. + *metav1.LabelSelector `json:",inline"` - // storageClassName is the name of the StorageClass required by the claim. - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - // +optional - StorageClassName *string `json:"storageClassName,omitempty"` + // strategy specifies the strategy to select the target pod when multiple pods + // are selected. + // Valid values are: + // - All: select all pods that match the labelsSelector. + // - Any: select any one pod that match the labelsSelector. + // +kubebuilder:default=Any + Strategy PodSelectionStrategy `json:"strategy,omitempty"` +} - // initCapacity represents the init storage size of the PersistentVolumeClaim which should be created if not exist. - // and the default value is 100Gi if it is empty. - // +optional - InitCapacity resource.Quantity `json:"initCapacity,omitempty"` +// PodSelectionStrategy specifies the strategy to select when multiple pods are +// selected for backup target +// +kubebuilder:validation:Enum=All;Any +type PodSelectionStrategy string - // createPolicy defines the policy for creating the PersistentVolumeClaim, enum values: - // - Never: do nothing if the PersistentVolumeClaim not exists. - // - IfNotPresent: create the PersistentVolumeClaim if not present and the accessModes only contains 'ReadWriteMany'. - // +kubebuilder:default=IfNotPresent - // +optional - CreatePolicy CreatePVCPolicy `json:"createPolicy,omitempty"` - - // persistentVolumeConfigMap references the configmap which contains a persistentVolume template. - // key must be "persistentVolume" and value is the "PersistentVolume" struct. - // support the following built-in Objects: - // - $(GENERATE_NAME): generate a specific format "`PVC NAME`-`PVC NAMESPACE`". - // if the PersistentVolumeClaim not exists and CreatePolicy is "IfNotPresent", the controller - // will create it by this template. this is a mutually exclusive setting with "storageClassName". - // +optional - PersistentVolumeConfigMap *PersistentVolumeConfigMap `json:"persistentVolumeConfigMap,omitempty"` -} +const ( + // PodSelectionStrategyAll selects all pods that match the labelsSelector. + PodSelectionStrategyAll PodSelectionStrategy = "All" -type PersistentVolumeConfigMap struct { - // the name of the persistentVolume ConfigMap. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - Name string `json:"name"` + // PodSelectionStrategyAny selects any one pod that match the labelsSelector. + PodSelectionStrategyAny PodSelectionStrategy = "Any" +) - // the namespace of the persistentVolume ConfigMap. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$` - Namespace string `json:"namespace"` -} +// ConnectionCredential specifies the connection credential to connect to the +// target database cluster. +type ConnectionCredential struct { + // secretName refers to the Secret object that contains the connection credential. + // +kube:validation:Required + SecretName string `json:"secretName,omitempty"` -type BasePolicy struct { - // target database cluster for backup. - // +kubebuilder:validation:Required - Target TargetCluster `json:"target"` + // usernameKey specifies the map key of the user in the connection credential secret. + // +kubebuilder:default=username + UsernameKey string `json:"usernameKey,omitempty"` - // the number of automatic backups to retain. Value must be non-negative integer. - // 0 means NO limit on the number of backups. - // +kubebuilder:default=7 - // +optional - BackupsHistoryLimit int32 `json:"backupsHistoryLimit,omitempty"` + // passwordKey specifies the map key of the password in the connection credential secret. + // +kubebuilder:default=password + PasswordKey string `json:"passwordKey,omitempty"` - // count of backup stop retries on fail. - // +optional - OnFailAttempted int32 `json:"onFailAttempted,omitempty"` + // hostKey specifies the map key of the host in the connection credential secret. + HostKey string `json:"hostKey,omitempty"` - // define how to update metadata for backup status. - // +optional - BackupStatusUpdates []BackupStatusUpdate `json:"backupStatusUpdates,omitempty"` + // portKey specifies the map key of the port in the connection credential secret. + // +kubebuilder:default=port + PortKey string `json:"portKey,omitempty"` } -// TargetCluster TODO (dsj): target cluster need redefined from Cluster API -type TargetCluster struct { - // labelsSelector is used to find matching pods. - // Pods that match this label selector are counted to determine the number of pods - // in their corresponding topology domain. - // +kubebuilder:validation:Required - // +kubebuilder:pruning:PreserveUnknownFields - LabelsSelector *metav1.LabelSelector `json:"labelsSelector"` +// KubeResources defines the kubernetes resources to back up. +type KubeResources struct { + // selector is a metav1.LabelSelector to filter the target kubernetes resources + // that need to be backed up. + // If not set, will do not back up any kubernetes resources. + // +kube:validation:Required + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // included is a slice of namespaced-scoped resource type names to include in + // the kubernetes resources. + // The default value is "*", which means all resource types will be included. + // +optional + // +kubebuilder:default={"*"} + Included []string `json:"included,omitempty"` - // secret is used to connect to the target database cluster. - // If not set, secret will be inherited from backup policy template. - // if still not set, the controller will check if any system account for dataprotection has been created. + // excluded is a slice of namespaced-scoped resource type names to exclude in + // the kubernetes resources. + // The default value is empty. // +optional - Secret *BackupPolicySecret `json:"secret,omitempty"` + Excluded []string `json:"excluded,omitempty"` } -// BackupPolicySecret defines for the target database secret that backup tool can connect. -type BackupPolicySecret struct { - // the secret name +// BackupMethod defines the backup method. +type BackupMethod struct { + // the name of backup method. // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` Name string `json:"name"` - // usernameKey the map key of the user in the connection credential secret - // +kubebuilder:validation:Required - // +kubebuilder:default=username - UsernameKey string `json:"usernameKey,omitempty"` - - // passwordKey the map key of the password in the connection credential secret - // +kubebuilder:validation:Required - // +kubebuilder:default=password - PasswordKey string `json:"passwordKey,omitempty"` -} - -// BackupPolicyHook defines for the database execute commands before and after backup. -type BackupPolicyHook struct { - // pre backup to perform commands + // snapshotVolumes specifies whether to take snapshots of persistent volumes. + // if true, the BackupScript is not required, the controller will use the CSI + // volume snapshotter to create the snapshot. // +optional - PreCommands []string `json:"preCommands,omitempty"` + // +kubebuilder:default=false + SnapshotVolumes *bool `json:"snapshotVolumes,omitempty"` - // post backup to perform commands + // actionSetName refers to the ActionSet object that defines the backup actions. + // For volume snapshot backup, the actionSet is not required, the controller + // will use the CSI volume snapshotter to create the snapshot. // +optional - PostCommands []string `json:"postCommands,omitempty"` + ActionSetName string `json:"actionSetName,omitempty"` - // exec command with image + // targetVolumes specifies which volumes from the target should be mounted in + // the backup workload. // +optional - Image string `json:"image,omitempty"` + TargetVolumes *TargetVolumeInfo `json:"targetVolumes,omitempty"` - // which container can exec command + // env specifies the environment variables for the backup workload. // +optional - ContainerName string `json:"containerName,omitempty"` -} - -// BackupStatusUpdateStage defines the stage of backup status update. -// +enum -// +kubebuilder:validation:Enum={pre,post} -type BackupStatusUpdateStage string + Env []corev1.EnvVar `json:"env,omitempty"` -const ( - PRE BackupStatusUpdateStage = "pre" - POST BackupStatusUpdateStage = "post" -) - -type BackupStatusUpdate struct { - // specify the json path of backup object for patch. - // example: manifests.backupLog -- means patch the backup json path of status.manifests.backupLog. + // runtimeSettings specifies runtime settings for the backup workload container. // +optional - Path string `json:"path,omitempty"` + RuntimeSettings *RuntimeSettings `json:"runtimeSettings,omitempty"` +} - // which container name that kubectl can execute. +// TargetVolumeInfo specifies the volumes and their mounts of the targeted application +// that should be mounted in backup workload. +type TargetVolumeInfo struct { + // Volumes indicates the list of volumes of targeted application that should + // be mounted on the backup job. // +optional - ContainerName string `json:"containerName,omitempty"` + Volumes []string `json:"volumes,omitempty"` - // the shell Script commands to collect backup status metadata. - // The script must exist in the container of ContainerName and the output format must be set to JSON. - // Note that outputting to stderr may cause the result format to not be in JSON. + // volumeMounts specifies the mount for the volumes specified in `Volumes` section. // +optional - Script string `json:"script,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` +} - // useTargetPodServiceAccount defines whether this job requires the service account of the backup target pod. - // if true, will use the service account of the backup target pod. otherwise, will use the system service account. +type RuntimeSettings struct { + // resources specifies the resource required by container. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional - UseTargetPodServiceAccount bool `json:"useTargetPodServiceAccount,omitempty"` - - // when to update the backup status, pre: before backup, post: after backup - // +kubebuilder:validation:Required - UpdateStage BackupStatusUpdateStage `json:"updateStage"` + Resources corev1.ResourceRequirements `json:"resources,omitempty"` } // BackupPolicyStatus defines the observed state of BackupPolicy type BackupPolicyStatus struct { - - // observedGeneration is the most recent generation observed for this - // BackupPolicy. It corresponds to the Cluster's generation, which is - // updated on mutation by the API Server. + // phase - in list of [Available,Unavailable] // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + Phase Phase `json:"phase,omitempty"` - // backup policy phase valid value: Available, Failed. + // A human-readable message indicating details about why the BackupPolicy is + // in this phase. // +optional - Phase BackupPolicyPhase `json:"phase,omitempty"` + Message string `json:"message,omitempty"` - // the reason if backup policy check failed. + // observedGeneration is the most recent generation observed for this + // BackupPolicy. It refers to the BackupPolicy's generation, which is + // updated on mutation by the API Server. // +optional - FailureReason string `json:"failureReason,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} - // information when was the last time the job was successfully scheduled. - // +optional - LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` +// BackupPolicyPhase defines phases for BackupPolicy. +// +enum +// +kubebuilder:validation:Enum={Available,Failed} +type BackupPolicyPhase string - // information when was the last time the job successfully completed. - // +optional - LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` -} +const ( + BackupPolicyAvailable BackupPolicyPhase = "Available" + BackupPolicyFailed BackupPolicyPhase = "Failed" +) // +genclient // +k8s:openapi-gen=true // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:categories={kubeblocks},scope=Namespaced,shortName=bp +// +kubebuilder:printcolumn:name="BACKUP-REPO", type=string, JSONPath=`.spec.backupRepoName` // +kubebuilder:printcolumn:name="STATUS",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="LAST SCHEDULE",type=string,JSONPath=`.status.lastScheduleTime` // +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=`.metadata.creationTimestamp` -// BackupPolicy is the Schema for the backuppolicies API (defined by User) +// BackupPolicy is the Schema for the backuppolicies API. type BackupPolicy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -323,53 +253,3 @@ type BackupPolicyList struct { func init() { SchemeBuilder.Register(&BackupPolicy{}, &BackupPolicyList{}) } - -func (r *BackupPolicySpec) GetCommonPolicy(backupType BackupType) *CommonBackupPolicy { - switch backupType { - case BackupTypeDataFile: - return r.Datafile - case BackupTypeLogFile: - return r.Logfile - } - return nil -} - -func (r *BackupPolicySpec) GetCommonSchedulePolicy(backupType BackupType) *SchedulePolicy { - switch backupType { - case BackupTypeSnapshot: - return r.Schedule.Snapshot - case BackupTypeDataFile: - return r.Schedule.Datafile - case BackupTypeLogFile: - return r.Schedule.Logfile - } - return nil -} - -// ToDuration converts the ttl string to time.Duration. -func ToDuration(ttl *string) time.Duration { - if ttl == nil { - return time.Duration(0) - } - ttlLower := strings.ToLower(*ttl) - if strings.HasSuffix(ttlLower, "d") { - days, _ := strconv.Atoi(strings.ReplaceAll(ttlLower, "d", "")) - return time.Hour * 24 * time.Duration(days) - } - hours, _ := strconv.Atoi(strings.ReplaceAll(ttlLower, "h", "")) - return time.Hour * time.Duration(hours) -} - -// AddTTL adds tll with hours -func AddTTL(ttl *string, hours int) string { - if ttl == nil { - return "" - } - ttlLower := strings.ToLower(*ttl) - if strings.HasSuffix(ttlLower, "d") { - days, _ := strconv.Atoi(strings.ReplaceAll(ttlLower, "d", "")) - return fmt.Sprintf("%dh", days*24+hours) - } - ttlHours, _ := strconv.Atoi(strings.ReplaceAll(ttlLower, "h", "")) - return fmt.Sprintf("%dh", ttlHours+hours) -} diff --git a/apis/dataprotection/v1alpha1/backuppolicy_types_test.go b/apis/dataprotection/v1alpha1/backuppolicy_types_test.go deleted file mode 100644 index b63d26e7b49..00000000000 --- a/apis/dataprotection/v1alpha1/backuppolicy_types_test.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "testing" - "time" - - . "github.com/onsi/gomega" -) - -func expectToDuration(t *testing.T, ttl string, baseNum, targetNum int) { - d := ToDuration(&ttl) - if d != time.Hour*time.Duration(baseNum)*time.Duration(targetNum) { - t.Errorf(`Expected duration is "%d*%d*time.Hour"", got %v`, targetNum, baseNum, d) - } -} - -func TestToDuration(t *testing.T) { - d := ToDuration(nil) - if d != time.Duration(0) { - t.Errorf("Expected duration is 0, got %v", d) - } - expectToDuration(t, "7d", 24, 7) - expectToDuration(t, "7D", 24, 7) - expectToDuration(t, "12h", 1, 12) - expectToDuration(t, "12H", 1, 12) -} - -func TestAddTTL(t *testing.T) { - ttl := "7d" - newTTL := AddTTL(&ttl, 12) - if newTTL != "180h" { - t.Errorf("expected new ttl is 180h, bur got %s", newTTL) - } - ttl = "7h" - newTTL = AddTTL(&ttl, 12) - if newTTL != "19h" { - t.Errorf("expected new ttl is 19h, bur got %s", newTTL) - } -} - -func TestGetCommonPolicy(t *testing.T) { - g := NewGomegaWithT(t) - - policySpec := &BackupPolicySpec{} - g.Expect(policySpec.GetCommonPolicy(BackupTypeSnapshot)).Should(BeNil()) - - policySpec = &BackupPolicySpec{Datafile: &CommonBackupPolicy{}, Logfile: &CommonBackupPolicy{}} - g.Expect(policySpec.GetCommonPolicy(BackupTypeSnapshot)).Should(BeNil()) - g.Expect(policySpec.GetCommonPolicy(BackupTypeDataFile)).ShouldNot(BeNil()) - g.Expect(policySpec.GetCommonPolicy(BackupTypeLogFile)).ShouldNot(BeNil()) -} - -func TestGetCommonSchedulePolicy(t *testing.T) { - g := NewGomegaWithT(t) - - policySpec := &BackupPolicySpec{} - g.Expect(policySpec.GetCommonSchedulePolicy(BackupTypeSnapshot)).Should(BeNil()) - - policySpec = &BackupPolicySpec{Schedule: Schedule{ - Snapshot: &SchedulePolicy{}, - Datafile: &SchedulePolicy{}, - Logfile: &SchedulePolicy{}, - }} - g.Expect(policySpec.GetCommonSchedulePolicy(BackupTypeSnapshot)).ShouldNot(BeNil()) - g.Expect(policySpec.GetCommonSchedulePolicy(BackupTypeDataFile)).ShouldNot(BeNil()) - g.Expect(policySpec.GetCommonSchedulePolicy(BackupTypeLogFile)).ShouldNot(BeNil()) -} diff --git a/apis/dataprotection/v1alpha1/backupschedule_types.go b/apis/dataprotection/v1alpha1/backupschedule_types.go new file mode 100644 index 00000000000..a707a2fb167 --- /dev/null +++ b/apis/dataprotection/v1alpha1/backupschedule_types.go @@ -0,0 +1,159 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BackupScheduleSpec defines the desired state of BackupSchedule. +type BackupScheduleSpec struct { + // Which backupPolicy is applied to perform this backup. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + BackupPolicyName string `json:"backupPolicyName"` + + // startingDeadlineMinutes defines the deadline in minutes for starting the + // backup workload if it misses scheduled time for any reason. + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=1440 + StartingDeadlineMinutes *int64 `json:"startingDeadlineMinutes,omitempty"` + + // schedules defines the list of backup schedules. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + Schedules []SchedulePolicy `json:"schedules"` +} + +type SchedulePolicy struct { + // enabled specifies whether the backup schedule is enabled or not. + // +optional + Enabled *bool `json:"enabled,omitempty"` + + // backupMethod specifies the backup method name that is defined in backupPolicy. + // +kubebuilder:validation:Required + BackupMethod string `json:"backupMethod"` + + // the cron expression for schedule, the timezone is in UTC. + // see https://en.wikipedia.org/wiki/Cron. + // +kubebuilder:validation:Required + CronExpression string `json:"cronExpression"` + + // retentionPeriod determines a duration up to which the backup should be kept. + // controller will remove all backups that are older than the RetentionPeriod. + // For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. + // Sample duration format: + // - years: 2y + // - months: 6mo + // - days: 30d + // - hours: 12h + // - minutes: 30m + // You can also combine the above durations. For example: 30d12h30m + // +optional + // +kubebuilder:default="7d" + RetentionPeriod RetentionPeriod `json:"retentionPeriod,omitempty"` +} + +// BackupScheduleStatus defines the observed state of BackupSchedule. +type BackupScheduleStatus struct { + // phase describes the phase of the BackupSchedule. + // +optional + Phase BackupSchedulePhase `json:"phase,omitempty"` + + // observedGeneration is the most recent generation observed for this + // BackupSchedule. It refers to the BackupSchedule's generation, which is + // updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // failureReason is an error that caused the backup to fail. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // schedules describes the status of each schedule. + // +optional + Schedules map[string]ScheduleStatus `json:"schedules,omitempty"` +} + +// BackupSchedulePhase defines the phase of BackupSchedule +type BackupSchedulePhase string + +const ( + // BackupSchedulePhaseAvailable means the backup schedule is available. + BackupSchedulePhaseAvailable BackupSchedulePhase = "Available" + + // BackupSchedulePhaseFailed means the backup schedule is failed. + BackupSchedulePhaseFailed BackupSchedulePhase = "Failed" +) + +// ScheduleStatus defines the status of each schedule. +type ScheduleStatus struct { + // phase describes the phase of the schedule. + // +optional + Phase SchedulePhase `json:"phase,omitempty"` + + // failureReason is an error that caused the backup to fail. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // lastScheduleTime records the last time the backup was scheduled. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` + + // lastSuccessfulTime records the last time the backup was successfully completed. + // +optional + LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` +} + +// SchedulePhase defines the phase of schedule +type SchedulePhase string + +const ( + ScheduleRunning SchedulePhase = "Running" + ScheduleFailed SchedulePhase = "Failed" +) + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories={kubeblocks},scope=Namespaced,shortName=bs +// +kubebuilder:printcolumn:name="STATUS",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=`.metadata.creationTimestamp` + +// BackupSchedule is the Schema for the backupschedules API. +type BackupSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupScheduleSpec `json:"spec,omitempty"` + Status BackupScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupScheduleList contains a list of BackupSchedule. +type BackupScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupSchedule `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BackupSchedule{}, &BackupScheduleList{}) +} diff --git a/apis/dataprotection/v1alpha1/backuptool_types.go b/apis/dataprotection/v1alpha1/backuptool_types.go deleted file mode 100644 index c4e82a2797b..00000000000 --- a/apis/dataprotection/v1alpha1/backuptool_types.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// BackupToolSpec defines the desired state of BackupTool -type BackupToolSpec struct { - // Backup tool Container image name. - // +kubebuilder:validation:Required - Image string `json:"image"` - - // which kind for run a backup tool, supported values: job, statefulSet. - // +kubebuilder:default=job - DeployKind DeployKind `json:"deployKind,omitempty"` - - // the type of backup tool, file or pitr - // +kubebuilder:validation:Enum={file,pitr} - // +kubebuilder:default=file - Type string `json:"type,omitempty"` - - // Compute Resources required by this container. - // Cannot be updated. - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Resources *corev1.ResourceRequirements `json:"resources,omitempty"` - - // List of environment variables to set in the container. - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` - - // Array of command that apps can do database backup. - // from invoke args - // the order of commands follows the order of array. - // +kubebuilder:validation:Required - BackupCommands []string `json:"backupCommands"` - - // Array of command that apps can do database incremental backup. - // like xtrabackup, that can performs an incremental backup file. - // +optional - IncrementalBackupCommands []string `json:"incrementalBackupCommands,omitempty"` - - // backup tool can support physical restore, in this case, restore must be RESTART database. - // +optional - Physical *PhysicalConfig `json:"physical,omitempty"` - - // backup tool can support logical restore, in this case, restore NOT RESTART database. - // +optional - Logical *LogicalConfig `json:"logical,omitempty"` -} - -type LogicalConfig struct { - BackupToolRestoreCommand `json:",inline"` - - // podScope defines the pod scope for restore from backup, supported values: - // - 'All' will exec the restore command on all pods. - // - 'ReadWrite' will pick a ReadWrite pod to exec the restore command. - // +optional - // +kubebuilder:default=All - PodScope PodRestoreScope `json:"podScope,omitempty"` -} - -type PhysicalConfig struct { - BackupToolRestoreCommand `json:",inline"` - - // relyOnLogfile defines whether the current recovery relies on log files - // +optional - RelyOnLogfile bool `json:"relyOnLogfile,omitempty"` -} - -// BackupToolRestoreCommand defines the restore commands of BackupTool -type BackupToolRestoreCommand struct { - // Array of command that apps can perform database restore. - // like xtrabackup, that can performs restore mysql from files. - // +optional - RestoreCommands []string `json:"restoreCommands"` - - // Array of incremental restore commands. - // +optional - IncrementalRestoreCommands []string `json:"incrementalRestoreCommands,omitempty"` -} - -// BackupToolStatus defines the observed state of BackupTool -type BackupToolStatus struct { - // TODO(dsj): define backup tool status. -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:openapi-gen=true -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:categories={kubeblocks},scope=Cluster - -// BackupTool is the Schema for the backuptools API (defined by provider) -type BackupTool struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec BackupToolSpec `json:"spec,omitempty"` - Status BackupToolStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// BackupToolList contains a list of BackupTool -type BackupToolList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []BackupTool `json:"items"` -} - -func init() { - SchemeBuilder.Register(&BackupTool{}, &BackupToolList{}) -} - -func (physical *PhysicalConfig) GetPhysicalRestoreCommand() []string { - if physical == nil || len(physical.RestoreCommands) == 0 { - return nil - } - return physical.RestoreCommands -} - -func (physical *PhysicalConfig) IsRelyOnLogfile() bool { - return physical != nil && physical.RelyOnLogfile -} - -func (logical *LogicalConfig) GetLogicalRestoreCommand() []string { - if logical == nil || len(logical.RestoreCommands) == 0 { - return nil - } - return logical.RestoreCommands -} diff --git a/apis/dataprotection/v1alpha1/restore_types.go b/apis/dataprotection/v1alpha1/restore_types.go new file mode 100644 index 00000000000..fa9d110a1e5 --- /dev/null +++ b/apis/dataprotection/v1alpha1/restore_types.go @@ -0,0 +1,438 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// RestoreSpec defines the desired state of Restore +type RestoreSpec struct { + // backup name, the following behavior based on the backup type: + // 1. Full: will be restored the full backup directly. + // 2. Incremental: will be restored sequentially from the most recent full backup of this incremental backup. + // 3. Differential: will be restored sequentially from the parent backup of the differential backup. + // 4. Continuous: will find the most recent full backup at this time point and the input continuous backup to restore. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.backupName" + Backup BackupConfig `json:"backup"` + + // restore according to a specified point in time. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.restoreTime" + // +optional + // +kubebuilder:validation:Pattern=`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$` + RestoreTime string `json:"restoreTime,omitempty"` + + // restore the specified resources of kubernetes. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.resources" + // +optional + Resources *RestoreKubeResources `json:"resources,omitempty"` + + // configuration for the action of "prepareData" phase, including the persistent volume claims + // that need to be restored and scheduling strategy of temporary recovery pod. + // +optional + PrepareDataConfig *PrepareDataConfig `json:"prepareDataConfig,omitempty"` + + // service account name which needs for recovery pod. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // configuration for the action of "postReady" phase. + // +kubebuilder:validation:XValidation:rule="has(self.jobAction) || has(self.execAction)", message="at least one exists for jobAction and execAction." + // +optional + ReadyConfig *ReadyConfig `json:"readyConfig,omitempty"` + + // list of environment variables to set in the container for restore and will be merged with the env of Backup and ActionSet. + // the priority of merging is as follows: Restore env > Backup env > ActionSet env. + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // specified the required resources of restore job's container. + // +optional + ContainerResources corev1.ResourceRequirements `json:"containerResources,omitempty"` +} + +type BackupConfig struct { + // backup name + // +kubebuilder:validation:Required + Name string `json:"name"` + + // backup namespace + // +kubebuilder:validation:Required + Namespace string `json:"namespace"` +} + +type RestoreKubeResources struct { + // will restore the specified resources + IncludeResources []IncludeResource `json:"included,omitempty"` + + // TODO: supports exclude resources for recovery +} + +type IncludeResource struct { + // + // +kubebuilder:validation:Required + GroupResource string `json:"groupResource"` + + // select the specified resource for recovery by label. + // +optional + LabelSelector metav1.LabelSelector `json:"labelSelector,omitempty"` +} + +type PrepareDataConfig struct { + // dataSourceRef describes the configuration when using `persistentVolumeClaim.spec.dataSourceRef` method for restoring. + // it describes the source volume of the backup targetVolumes and how to mount path in the restoring container. + // +kubebuilder:validation:XValidation:rule="self.volumeSource != '' || self.mountPath !=''",message="at least one exists for volumeSource and mountPath." + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.prepareDataConfig.dataSourceRef" + // +optional + DataSourceRef *VolumeConfig `json:"dataSourceRef,omitempty"` + + // volumeClaims defines the persistent Volume claims that need to be restored and mount them together into the restore job. + // these persistent Volume claims will be created if not exist. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.prepareDataConfig.volumeClaims" + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +optional + RestoreVolumeClaims []RestoreVolumeClaim `json:"volumeClaims,omitempty"` + + // volumeClaimsTemplate defines a template to build persistent Volume claims that need to be restored. + // these claims will be created in an orderly manner based on the number of replicas or reused if already exist. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.prepareDataConfig.volumeClaimsTemplate" + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +optional + RestoreVolumeClaimsTemplate *RestoreVolumeClaimsTemplate `json:"volumeClaimsTemplate,omitempty"` + + // VolumeClaimManagementPolicy defines recovery strategy for persistent volume claim. supported policies are as follows: + // 1. Parallel: parallel recovery of persistent volume claim. + // 2. Serial: restore the persistent volume claim in sequence, and wait until the previous persistent volume claim is restored before restoring a new one. + // +kubebuilder:default=Parallel + // +kubebuilder:validation:Required + VolumeClaimManagementPolicy VolumeClaimManagementPolicy `json:"volumeClaimManagementPolicy"` + + // scheduling spec for restoring pod. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.prepareDataConfig.schedulingSpec" + // +optional + SchedulingSpec SchedulingSpec `json:"schedulingSpec,omitempty"` +} + +type ReadyConfig struct { + // configuration for job action. + // +optional + JobAction *JobAction `json:"jobAction,omitempty"` + + // configuration for exec action. + // +optional + ExecAction *ExecAction `json:"execAction,omitempty"` + + // credential template used for creating a connection credential + // +optional + ConnectCredential *ConnectCredential `json:"connectCredential,omitempty"` + + // periodic probe of the service readiness. + // controller will perform postReadyHooks of BackupScript.spec.restore after the service readiness when readinessProbe is configured. + // +optional + ReadinessProbe *ReadinessProbe `json:"readinessProbe,omitempty"` +} + +type JobAction struct { + // jobActionTarget defines the pod that need to be executed for the job action. + // will select a pod that meets the conditions to execute. + // +kubebuilder:validation:Required + Target JobActionTarget `json:"target"` +} + +type ExecAction struct { + // execActionTarget defines the pods that need to be executed for the exec action. + // will execute on all pods that meet the conditions. + // +optional + Target ExecActionTarget `json:"target"` +} + +type ExecActionTarget struct { + // kubectl exec in all selected pods. + // +kubebuilder:validation:Required + PodSelector metav1.LabelSelector `json:"podSelector"` +} + +type JobActionTarget struct { + // select one of the pods which selected by labels to build the job spec, such as mount required volumes and inject built-in env of the selected pod. + // +kubebuilder:validation:Required + PodSelector metav1.LabelSelector `json:"podSelector"` + + // volumeMounts defines which volumes of the selected pod need to be mounted on the restoring pod. + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +optional + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` +} + +type VolumeConfig struct { + // volumeSource describes the volume will be restored from the specified volume of the backup targetVolumes. + // required if the backup uses volume snapshot. + // +optional + VolumeSource string `json:"volumeSource,omitempty"` + + // mountPath path within the restoring container at which the volume should be mounted. + // +optional + MountPath string `json:"mountPath,omitempty"` +} + +type RestoreVolumeClaim struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +kubebuilder:validation:Required + metav1.ObjectMeta `json:"metadata"` + + // volumeClaimSpec defines the desired characteristics of a persistent volume claim. + // +kubebuilder:validation:Required + VolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"volumeClaimSpec"` + + // describing the source volume of the backup targetVolumes and how to mount path in the restoring container. + // +kubebuilder:validation:XValidation:rule="self.volumeSource != '' || self.mountPath !=''",message="at least one exists for volumeSource and mountPath." + VolumeConfig `json:",inline"` +} + +type RestoreVolumeClaimsTemplate struct { + // templates is a list of volume claims. + // +kubebuilder:validation:Required + Templates []RestoreVolumeClaim `json:"templates"` + + // the replicas of persistent volume claim which need to be created and restored. + // the format of created claim name is "-". + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Required + Replicas int32 `json:"replicas"` + + // the starting index for the created persistent volume claim by according to template. + // minimum is 0. + // +kubebuilder:validation:Minimum=0 + StartingIndex int32 `json:"startingIndex,omitempty"` +} + +type SchedulingSpec struct { + // the restoring pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // nodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + // +mapType=atomic + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // nodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty"` + + // affinity is a group of affinity scheduling rules. + // refer to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // topologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // refer to https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty"` +} + +type ConnectCredential struct { + // the secret name + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + SecretName string `json:"secretName"` + + // usernameKey the map key of the user in the connection credential secret + // +kubebuilder:validation:Required + // +kubebuilder:default=username + UsernameKey string `json:"usernameKey"` + + // passwordKey the map key of the password in the connection credential secret + // +kubebuilder:validation:Required + // +kubebuilder:default=password + PasswordKey string `json:"passwordKey"` + + // hostKey the map key of the host in the connection credential secret + // +kubebuilder:default=host + HostKey string `json:"hostKey,omitempty"` + + // portKey the map key of the port in the connection credential secret + // +kubebuilder:default=port + PortKey string `json:"portKey,omitempty"` +} + +type ReadinessProbe struct { + // number of seconds after the container has started before probe is initiated. + // +optional + // +kubebuilder:validation:Minimum=0 + InitialDelaySeconds int `json:"initialDelaySeconds,omitempty"` + + // number of seconds after which the probe times out. + // defaults to 30 second, minimum value is 1. + // +optional + // +kubebuilder:default=30 + // +kubebuilder:validation:Minimum=1 + TimeoutSeconds int `json:"timeoutSeconds"` + + // how often (in seconds) to perform the probe. + // defaults to 5 second, minimum value is 1. + // +optional + // +kubebuilder:default=5 + // +kubebuilder:validation:Minimum=1 + PeriodSeconds int `json:"periodSeconds"` + + // exec specifies the action to take. + // +kubebuilder:validation:Required + Exec ReadinessProbeExecAction `json:"exec"` + + // TODO: support readiness probe by checking k8s resource +} + +type ReadinessProbeExecAction struct { + // refer to container image. + // +kubebuilder:validation:Required + Image string `json:"image"` + + // refer to container command. + // +kubebuilder:validation:Required + Command []string `json:"command"` +} + +type RestoreStatusActions struct { + // record the actions for prepareData phase. + // +patchMergeKey=jobName + // +patchStrategy=merge,retainKeys + // +optional + PrepareData []RestoreStatusAction `json:"prepareData,omitempty"` + + // record the actions for postReady phase. + // +patchMergeKey=jobName + // +patchStrategy=merge,retainKeys + // +optional + PostReady []RestoreStatusAction `json:"postReady,omitempty"` +} + +type RestoreStatusAction struct { + // name describes the name of the recovery action based on the current backup. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // which backup's restore action belongs to. + // +kubebuilder:validation:Required + BackupName string `json:"backupName"` + + // the execution object of the restore action. + // +kubebuilder:validation:Required + ObjectKey string `json:"objectKey"` + + // message is a human readable message indicating details about the object condition. + // +optional + Message string `json:"message,omitempty"` + + // the status of this action. + // +kubebuilder:validation:Required + Status RestoreActionStatus `json:"status,omitempty"` + + // startTime is the start time for the restore job. + // +optional + StartTime metav1.Time `json:"startTime,omitempty"` + + // endTime is the completion time for the restore job. + // +optional + EndTime metav1.Time `json:"endTime,omitempty"` +} + +// RestoreStatus defines the observed state of Restore +type RestoreStatus struct { + // +optional + Phase RestorePhase `json:"phase,omitempty"` + + // Date/time when the restore started being processed. + // +optional + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // Date/time when the restore finished being processed. + // +optional + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // The duration time of restore execution. + // When converted to a string, the form is "1h2m0.5s". + // +optional + Duration *metav1.Duration `json:"duration,omitempty"` + + // recorded all restore actions performed. + // +optional + Actions RestoreStatusActions `json:"actions,omitempty"` + + // describe current state of restore API Resource, like warning. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories={kubeblocks,all} +// +kubebuilder:printcolumn:name="BACKUP",type="string",JSONPath=".spec.backup.name" +// +kubebuilder:printcolumn:name="RESTORE-TIME",type="string",JSONPath=".spec.restoreTime",description="Point in time for restoring" +// +kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.phase",description="Restore Status." +// +kubebuilder:printcolumn:name="DURATION",type=string,JSONPath=".status.duration" +// +kubebuilder:printcolumn:name="CREATE-TIME",type=string,JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="COMPLETION-TIME",type=string,JSONPath=".status.completionTimestamp" + +// Restore is the Schema for the restores API +type Restore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RestoreSpec `json:"spec,omitempty"` + Status RestoreStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// RestoreList contains a list of Restore +type RestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Restore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Restore{}, &RestoreList{}) +} +func (p *PrepareDataConfig) IsSerialPolicy() bool { + if p == nil { + return false + } + return p.VolumeClaimManagementPolicy == SerialManagementPolicy +} diff --git a/apis/dataprotection/v1alpha1/restorejob_types.go b/apis/dataprotection/v1alpha1/restorejob_types.go deleted file mode 100644 index a48c3f8afba..00000000000 --- a/apis/dataprotection/v1alpha1/restorejob_types.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// RestoreJobSpec defines the desired state of RestoreJob -type RestoreJobSpec struct { - // Specified one backupJob to restore. - // +kubebuilder:validation:Required - BackupJobName string `json:"backupJobName"` - - // the target database workload to restore - // +kubebuilder:validation:Required - Target TargetCluster `json:"target"` - - // array of restore volumes . - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:pruning:PreserveUnknownFields - TargetVolumes []corev1.Volume `json:"targetVolumes" patchStrategy:"merge,retainKeys" patchMergeKey:"name"` - - // array of restore volume mounts . - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:pruning:PreserveUnknownFields - TargetVolumeMounts []corev1.VolumeMount `json:"targetVolumeMounts" patchStrategy:"merge" patchMergeKey:"mountPath"` - - // count of backup stop retries on fail. - // +optional - OnFailAttempted int32 `json:"onFailAttempted,omitempty"` -} - -// RestoreJobStatus defines the observed state of RestoreJob -type RestoreJobStatus struct { - - // +optional - Phase RestoreJobPhase `json:"phase,omitempty"` - - // The date and time when the Backup is eligible for garbage collection. - // 'null' means the Backup is NOT be cleaned except delete manual. - // +optional - Expiration *metav1.Time `json:"expiration,omitempty"` - - // Date/time when the backup started being processed. - // +optional - StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` - - // Date/time when the backup finished being processed. - // +optional - CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` - - // Job failed reason. - // +optional - FailureReason string `json:"failureReason,omitempty"` -} - -// +genclient -// +k8s:openapi-gen=true -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:categories={kubeblocks},scope=Namespaced -// +kubebuilder:printcolumn:name="STATUS",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="COMPLETION-TIME",type=date,JSONPath=`.status.completionTimestamp` -// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=`.metadata.creationTimestamp` - -// RestoreJob is the Schema for the restorejobs API (defined by User) -type RestoreJob struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec RestoreJobSpec `json:"spec,omitempty"` - Status RestoreJobStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// RestoreJobList contains a list of RestoreJob -type RestoreJobList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []RestoreJob `json:"items"` -} - -func init() { - SchemeBuilder.Register(&RestoreJob{}, &RestoreJobList{}) -} diff --git a/apis/dataprotection/v1alpha1/types.go b/apis/dataprotection/v1alpha1/types.go index 48e0c97c686..979f13c5e39 100644 --- a/apis/dataprotection/v1alpha1/types.go +++ b/apis/dataprotection/v1alpha1/types.go @@ -16,107 +16,209 @@ limitations under the License. package v1alpha1 -// BackupPhase The current phase. Valid values are New, InProgress, Completed, Failed. -// +enum -// +kubebuilder:validation:Enum={New,InProgress,Running,Completed,Failed,Deleting} -type BackupPhase string - -const ( - BackupNew BackupPhase = "New" - BackupInProgress BackupPhase = "InProgress" - BackupRunning BackupPhase = "Running" - BackupCompleted BackupPhase = "Completed" - BackupFailed BackupPhase = "Failed" - BackupDeleting BackupPhase = "Deleting" +import ( + "errors" + "strconv" + "strings" + "time" + "unicode" ) -// BackupType the backup type, marked backup set is datafile or logfile or snapshot. +// Phase defines the BackupPolicy and ActionSet CR .status.phase // +enum -// +kubebuilder:validation:Enum={datafile,logfile,snapshot} -type BackupType string +// +kubebuilder:validation:Enum={Available,Unavailable} +type Phase string const ( - BackupTypeDataFile BackupType = "datafile" - BackupTypeLogFile BackupType = "logfile" - BackupTypeSnapshot BackupType = "snapshot" + AvailablePhase Phase = "Available" + UnavailablePhase Phase = "Unavailable" ) -// BackupMethod the backup method -// +enum -// +kubebuilder:validation:Enum={snapshot,backupTool} -type BackupMethod string - -const ( - BackupMethodSnapshot BackupMethod = "snapshot" - BackupMethodBackupTool BackupMethod = "backupTool" -) +func (p Phase) IsAvailable() bool { + return p == AvailablePhase +} -// BaseBackupType the base backup type. -// +enum -// +kubebuilder:validation:Enum={full,snapshot} -type BaseBackupType string - -// CreatePVCPolicy the policy how to create the PersistentVolumeClaim for backup. -// +enum -// +kubebuilder:validation:Enum={IfNotPresent,Never} -type CreatePVCPolicy string - -const ( - CreatePVCPolicyNever CreatePVCPolicy = "Never" - CreatePVCPolicyIfNotPresent CreatePVCPolicy = "IfNotPresent" -) - -// BackupPolicyPhase defines phases for BackupPolicy CR. +// BackupRepoPhase defines phases for BackupRepo CR. // +enum -// +kubebuilder:validation:Enum={Available,Failed} -type BackupPolicyPhase string +// +kubebuilder:validation:Enum={PreChecking,Failed,Ready,Deleting} +type BackupRepoPhase string const ( - PolicyAvailable BackupPolicyPhase = "Available" - PolicyFailed BackupPolicyPhase = "Failed" + BackupRepoPreChecking BackupRepoPhase = "PreChecking" + BackupRepoFailed BackupRepoPhase = "Failed" + BackupRepoReady BackupRepoPhase = "Ready" + BackupRepoDeleting BackupRepoPhase = "Deleting" ) -// RestoreJobPhase The current phase. Valid values are New, InProgressPhy, InProgressLogic, Completed, Failed. +// RetentionPeriod represents a duration in the format "1y2mo3w4d5h6m", where +// y=year, mo=month, w=week, d=day, h=hour, m=minute. +type RetentionPeriod string + +// ToDuration converts the RetentionPeriod to time.Duration. +func (r RetentionPeriod) ToDuration() (time.Duration, error) { + if len(r.String()) == 0 { + return time.Duration(0), nil + } + + minutes, err := r.toMinutes() + if err != nil { + return time.Duration(0), err + } + return time.Minute * time.Duration(minutes), nil +} + +func (r RetentionPeriod) String() string { + return string(r) +} + +func (r RetentionPeriod) toMinutes() (int, error) { + d, err := r.parseDuration() + if err != nil { + return 0, err + } + minutes := d.Minutes + minutes += d.Hours * 60 + minutes += d.Days * 24 * 60 + minutes += d.Weeks * 7 * 24 * 60 + minutes += d.Months * 30 * 24 * 60 + minutes += d.Years * 365 * 24 * 60 + return minutes, nil +} + +type duration struct { + Minutes int + Hours int + Days int + Weeks int + Months int + Years int +} + +var errInvalidDuration = errors.New("invalid duration provided") + +// parseDuration parses a duration from a string. The format is `6y5m234d37h` +func (r RetentionPeriod) parseDuration() (duration, error) { + var ( + d duration + num int + err error + ) + + s := strings.TrimSpace(r.String()) + for s != "" { + num, s, err = r.nextNumber(s) + if err != nil { + return duration{}, err + } + + if len(s) == 0 { + return duration{}, errInvalidDuration + } + + if len(s) > 1 && s[0] == 'm' && s[1] == 'o' { + d.Months = num + s = s[2:] + continue + } + + switch s[0] { + case 'y': + d.Years = num + case 'w': + d.Weeks = num + case 'd': + d.Days = num + case 'h': + d.Hours = num + case 'm': + d.Minutes = num + default: + return duration{}, errInvalidDuration + } + s = s[1:] + } + return d, nil +} + +func (r RetentionPeriod) nextNumber(input string) (num int, rest string, err error) { + if len(input) == 0 { + return 0, "", nil + } + + var ( + n string + negative bool + ) + + if input[0] == '-' { + negative = true + input = input[1:] + } + + for i, s := range input { + if !unicode.IsNumber(s) { + rest = input[i:] + break + } + + n += string(s) + } + + if len(n) == 0 { + return 0, input, errInvalidDuration + } + + num, err = strconv.Atoi(n) + if err != nil { + return 0, input, err + } + + if negative { + num = -num + } + return num, rest, nil +} + +// RestorePhase The current phase. Valid values are Running, Completed, Failed, Deleting. // +enum -// +kubebuilder:validation:Enum={New,InProgressPhy,InProgressLogic,Completed,Failed} -type RestoreJobPhase string +// +kubebuilder:validation:Enum={Running,Completed,Failed,Deleting} +type RestorePhase string const ( - RestoreJobNew RestoreJobPhase = "New" - RestoreJobInProgressPhy RestoreJobPhase = "InProgressPhy" - RestoreJobInProgressLogic RestoreJobPhase = "InProgressLogic" - RestoreJobCompleted RestoreJobPhase = "Completed" - RestoreJobFailed RestoreJobPhase = "Failed" + RestorePhaseRunning RestorePhase = "Running" + RestorePhaseCompleted RestorePhase = "Completed" + RestorePhaseFailed RestorePhase = "Failed" + RestorePhaseDeleting RestorePhase = "Deleting" ) -// DeployKind which kind for run a backup tool. +// RestoreActionStatus the status of restore action. // +enum -// +kubebuilder:validation:Enum={job,statefulSet} -type DeployKind string +// +kubebuilder:validation:Enum={Processing,Completed,Failed} +type RestoreActionStatus string const ( - DeployKindJob DeployKind = "job" - DeployKindStatefulSet DeployKind = "statefulSet" + RestoreActionProcessing RestoreActionStatus = "Processing" + RestoreActionCompleted RestoreActionStatus = "Completed" + RestoreActionFailed RestoreActionStatus = "Failed" ) -// PodRestoreScope defines the scope pod for restore from backup. -// +enum -// +kubebuilder:validation:Enum={All,ReadWrite} -type PodRestoreScope string +type RestoreStage string const ( - PodRestoreScopeAll = "All" - PodRestoreScopeReadWrite = "ReadWrite" + PrepareData RestoreStage = "prepareData" + PostReady RestoreStage = "postReady" ) -// BackupRepoPhase defines phases for BackupRepo CR. +// VolumeClaimManagementPolicy defines recovery strategy for persistent volume claim. +// Supported policies are as follows: +// 1. Parallel: parallel recovery of persistent volume claim. +// 2. Serial: restore the persistent volume claim in sequence, and wait until the +// previous persistent volume claim is restored before restoring a new one. // +enum -// +kubebuilder:validation:Enum={PreChecking,Failed,Ready,Deleting} -type BackupRepoPhase string +// +kubebuilder:validation:Enum={Parallel,Serial} +type VolumeClaimManagementPolicy string const ( - BackupRepoPreChecking BackupRepoPhase = "PreChecking" - BackupRepoFailed BackupRepoPhase = "Failed" - BackupRepoReady BackupRepoPhase = "Ready" - BackupRepoDeleting BackupRepoPhase = "Deleting" + ParallelManagementPolicy VolumeClaimManagementPolicy = "Parallel" + SerialManagementPolicy VolumeClaimManagementPolicy = "Serial" ) diff --git a/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go b/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go index 408d51001f1..19444298669 100644 --- a/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go +++ b/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go @@ -25,32 +25,32 @@ along with this program. If not, see . package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Backup) DeepCopyInto(out *Backup) { +func (in *ActionSet) DeepCopyInto(out *ActionSet) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. -func (in *Backup) DeepCopy() *Backup { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionSet. +func (in *ActionSet) DeepCopy() *ActionSet { if in == nil { return nil } - out := new(Backup) + out := new(ActionSet) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Backup) DeepCopyObject() runtime.Object { +func (in *ActionSet) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -58,31 +58,31 @@ func (in *Backup) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupList) DeepCopyInto(out *BackupList) { +func (in *ActionSetList) DeepCopyInto(out *ActionSetList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Backup, len(*in)) + *out = make([]ActionSet, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. -func (in *BackupList) DeepCopy() *BackupList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionSetList. +func (in *ActionSetList) DeepCopy() *ActionSetList { if in == nil { return nil } - out := new(BackupList) + out := new(ActionSetList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BackupList) DeepCopyObject() runtime.Object { +func (in *ActionSetList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -90,49 +90,148 @@ func (in *BackupList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupLogStatus) DeepCopyInto(out *BackupLogStatus) { +func (in *ActionSetSpec) DeepCopyInto(out *ActionSetSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupActionSpec) + (*in).DeepCopyInto(*out) + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreActionSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionSetSpec. +func (in *ActionSetSpec) DeepCopy() *ActionSetSpec { + if in == nil { + return nil + } + out := new(ActionSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionSetStatus) DeepCopyInto(out *ActionSetStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionSetStatus. +func (in *ActionSetStatus) DeepCopy() *ActionSetStatus { + if in == nil { + return nil + } + out := new(ActionSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionSpec) DeepCopyInto(out *ActionSpec) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecActionSpec) + (*in).DeepCopyInto(*out) + } + if in.Job != nil { + in, out := &in.Job, &out.Job + *out = new(JobActionSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionSpec. +func (in *ActionSpec) DeepCopy() *ActionSpec { + if in == nil { + return nil + } + out := new(ActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionStatus) DeepCopyInto(out *ActionStatus) { *out = *in - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp *out = (*in).DeepCopy() } - if in.StopTime != nil { - in, out := &in.StopTime, &out.StopTime + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp *out = (*in).DeepCopy() } + if in.AvailableReplicas != nil { + in, out := &in.AvailableReplicas, &out.AvailableReplicas + *out = new(int32) + **out = **in + } + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.TimeRange != nil { + in, out := &in.TimeRange, &out.TimeRange + *out = new(BackupTimeRange) + (*in).DeepCopyInto(*out) + } + if in.VolumeSnapshots != nil { + in, out := &in.VolumeSnapshots, &out.VolumeSnapshots + *out = make([]VolumeSnapshotStatus, len(*in)) + copy(*out, *in) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupLogStatus. -func (in *BackupLogStatus) DeepCopy() *BackupLogStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionStatus. +func (in *ActionStatus) DeepCopy() *ActionStatus { if in == nil { return nil } - out := new(BackupLogStatus) + out := new(ActionStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupPolicy) DeepCopyInto(out *BackupPolicy) { +func (in *Backup) DeepCopyInto(out *Backup) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicy. -func (in *BackupPolicy) DeepCopy() *BackupPolicy { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { if in == nil { return nil } - out := new(BackupPolicy) + out := new(Backup) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BackupPolicy) DeepCopyObject() runtime.Object { +func (in *Backup) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -140,56 +239,101 @@ func (in *BackupPolicy) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupPolicyHook) DeepCopyInto(out *BackupPolicyHook) { +func (in *BackupActionSpec) DeepCopyInto(out *BackupActionSpec) { *out = *in - if in.PreCommands != nil { - in, out := &in.PreCommands, &out.PreCommands - *out = make([]string, len(*in)) - copy(*out, *in) + if in.BackupData != nil { + in, out := &in.BackupData, &out.BackupData + *out = new(BackupDataActionSpec) + (*in).DeepCopyInto(*out) } - if in.PostCommands != nil { - in, out := &in.PostCommands, &out.PostCommands - *out = make([]string, len(*in)) - copy(*out, *in) + if in.PreBackup != nil { + in, out := &in.PreBackup, &out.PreBackup + *out = make([]ActionSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostBackup != nil { + in, out := &in.PostBackup, &out.PostBackup + *out = make([]ActionSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyHook. -func (in *BackupPolicyHook) DeepCopy() *BackupPolicyHook { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupActionSpec. +func (in *BackupActionSpec) DeepCopy() *BackupActionSpec { if in == nil { return nil } - out := new(BackupPolicyHook) + out := new(BackupActionSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupPolicyList) DeepCopyInto(out *BackupPolicyList) { +func (in *BackupConfig) DeepCopyInto(out *BackupConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfig. +func (in *BackupConfig) DeepCopy() *BackupConfig { + if in == nil { + return nil + } + out := new(BackupConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupDataActionSpec) DeepCopyInto(out *BackupDataActionSpec) { + *out = *in + in.JobActionSpec.DeepCopyInto(&out.JobActionSpec) + if in.SyncProgress != nil { + in, out := &in.SyncProgress, &out.SyncProgress + *out = new(SyncProgress) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupDataActionSpec. +func (in *BackupDataActionSpec) DeepCopy() *BackupDataActionSpec { + if in == nil { + return nil + } + out := new(BackupDataActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]BackupPolicy, len(*in)) + *out = make([]Backup, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyList. -func (in *BackupPolicyList) DeepCopy() *BackupPolicyList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { if in == nil { return nil } - out := new(BackupPolicyList) + out := new(BackupList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BackupPolicyList) DeepCopyObject() runtime.Object { +func (in *BackupList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -197,43 +341,125 @@ func (in *BackupPolicyList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupPolicySecret) DeepCopyInto(out *BackupPolicySecret) { +func (in *BackupMethod) DeepCopyInto(out *BackupMethod) { *out = *in + if in.SnapshotVolumes != nil { + in, out := &in.SnapshotVolumes, &out.SnapshotVolumes + *out = new(bool) + **out = **in + } + if in.TargetVolumes != nil { + in, out := &in.TargetVolumes, &out.TargetVolumes + *out = new(TargetVolumeInfo) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuntimeSettings != nil { + in, out := &in.RuntimeSettings, &out.RuntimeSettings + *out = new(RuntimeSettings) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicySecret. -func (in *BackupPolicySecret) DeepCopy() *BackupPolicySecret { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupMethod. +func (in *BackupMethod) DeepCopy() *BackupMethod { if in == nil { return nil } - out := new(BackupPolicySecret) + out := new(BackupMethod) in.DeepCopyInto(out) return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicy) DeepCopyInto(out *BackupPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicy. +func (in *BackupPolicy) DeepCopy() *BackupPolicy { + if in == nil { + return nil + } + out := new(BackupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyList) DeepCopyInto(out *BackupPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyList. +func (in *BackupPolicyList) DeepCopy() *BackupPolicyList { + if in == nil { + return nil + } + out := new(BackupPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupPolicySpec) DeepCopyInto(out *BackupPolicySpec) { *out = *in - if in.Retention != nil { - in, out := &in.Retention, &out.Retention - *out = new(RetentionSpec) - (*in).DeepCopyInto(*out) + if in.BackupRepoName != nil { + in, out := &in.BackupRepoName, &out.BackupRepoName + *out = new(string) + **out = **in } - in.Schedule.DeepCopyInto(&out.Schedule) - if in.Snapshot != nil { - in, out := &in.Snapshot, &out.Snapshot - *out = new(SnapshotPolicy) - (*in).DeepCopyInto(*out) + if in.BackoffLimit != nil { + in, out := &in.BackoffLimit, &out.BackoffLimit + *out = new(int32) + **out = **in } - if in.Datafile != nil { - in, out := &in.Datafile, &out.Datafile - *out = new(CommonBackupPolicy) + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(BackupTarget) (*in).DeepCopyInto(*out) } - if in.Logfile != nil { - in, out := &in.Logfile, &out.Logfile - *out = new(CommonBackupPolicy) - (*in).DeepCopyInto(*out) + if in.BackupMethods != nil { + in, out := &in.BackupMethods, &out.BackupMethods + *out = make([]BackupMethod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } @@ -250,14 +476,6 @@ func (in *BackupPolicySpec) DeepCopy() *BackupPolicySpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupPolicyStatus) DeepCopyInto(out *BackupPolicyStatus) { *out = *in - if in.LastScheduleTime != nil { - in, out := &in.LastScheduleTime, &out.LastScheduleTime - *out = (*in).DeepCopy() - } - if in.LastSuccessfulTime != nil { - in, out := &in.LastSuccessfulTime, &out.LastSuccessfulTime - *out = (*in).DeepCopy() - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyStatus. @@ -342,7 +560,7 @@ func (in *BackupRepoSpec) DeepCopyInto(out *BackupRepoSpec) { } if in.Credential != nil { in, out := &in.Credential, &out.Credential - *out = new(corev1.SecretReference) + *out = new(v1.SecretReference) **out = **in } } @@ -362,14 +580,14 @@ func (in *BackupRepoStatus) DeepCopyInto(out *BackupRepoStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.GeneratedCSIDriverSecret != nil { in, out := &in.GeneratedCSIDriverSecret, &out.GeneratedCSIDriverSecret - *out = new(corev1.SecretReference) + *out = new(v1.SecretReference) **out = **in } } @@ -385,16 +603,109 @@ func (in *BackupRepoStatus) DeepCopy() *BackupRepoStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupSnapshotStatus) DeepCopyInto(out *BackupSnapshotStatus) { +func (in *BackupSchedule) DeepCopyInto(out *BackupSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSchedule. +func (in *BackupSchedule) DeepCopy() *BackupSchedule { + if in == nil { + return nil + } + out := new(BackupSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleList) DeepCopyInto(out *BackupScheduleList) { *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSnapshotStatus. -func (in *BackupSnapshotStatus) DeepCopy() *BackupSnapshotStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleList. +func (in *BackupScheduleList) DeepCopy() *BackupScheduleList { if in == nil { return nil } - out := new(BackupSnapshotStatus) + out := new(BackupScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleSpec) DeepCopyInto(out *BackupScheduleSpec) { + *out = *in + if in.StartingDeadlineMinutes != nil { + in, out := &in.StartingDeadlineMinutes, &out.StartingDeadlineMinutes + *out = new(int64) + **out = **in + } + if in.Schedules != nil { + in, out := &in.Schedules, &out.Schedules + *out = make([]SchedulePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleSpec. +func (in *BackupScheduleSpec) DeepCopy() *BackupScheduleSpec { + if in == nil { + return nil + } + out := new(BackupScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleStatus) DeepCopyInto(out *BackupScheduleStatus) { + *out = *in + if in.Schedules != nil { + in, out := &in.Schedules, &out.Schedules + *out = make(map[string]ScheduleStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleStatus. +func (in *BackupScheduleStatus) DeepCopy() *BackupScheduleStatus { + if in == nil { + return nil + } + out := new(BackupScheduleStatus) in.DeepCopyInto(out) return out } @@ -431,600 +742,865 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { } if in.Duration != nil { in, out := &in.Duration, &out.Duration - *out = new(v1.Duration) + *out = new(metav1.Duration) **out = **in } - if in.AvailableReplicas != nil { - in, out := &in.AvailableReplicas, &out.AvailableReplicas - *out = new(int32) + if in.TimeRange != nil { + in, out := &in.TimeRange, &out.TimeRange + *out = new(BackupTimeRange) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(BackupTarget) + (*in).DeepCopyInto(*out) + } + if in.BackupMethod != nil { + in, out := &in.BackupMethod, &out.BackupMethod + *out = new(BackupMethod) + (*in).DeepCopyInto(*out) + } + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]ActionStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeSnapshots != nil { + in, out := &in.VolumeSnapshots, &out.VolumeSnapshots + *out = make([]VolumeSnapshotStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupTarget) DeepCopyInto(out *BackupTarget) { + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(PodSelector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionCredential != nil { + in, out := &in.ConnectionCredential, &out.ConnectionCredential + *out = new(ConnectionCredential) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(KubeResources) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupTarget. +func (in *BackupTarget) DeepCopy() *BackupTarget { + if in == nil { + return nil + } + out := new(BackupTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupTimeRange) DeepCopyInto(out *BackupTimeRange) { + *out = *in + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = (*in).DeepCopy() + } + if in.End != nil { + in, out := &in.End, &out.End + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupTimeRange. +func (in *BackupTimeRange) DeepCopy() *BackupTimeRange { + if in == nil { + return nil + } + out := new(BackupTimeRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectCredential) DeepCopyInto(out *ConnectCredential) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectCredential. +func (in *ConnectCredential) DeepCopy() *ConnectCredential { + if in == nil { + return nil + } + out := new(ConnectCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionCredential) DeepCopyInto(out *ConnectionCredential) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionCredential. +func (in *ConnectionCredential) DeepCopy() *ConnectionCredential { + if in == nil { + return nil + } + out := new(ConnectionCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + in.Target.DeepCopyInto(&out.Target) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecActionSpec) DeepCopyInto(out *ExecActionSpec) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Timeout = in.Timeout +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecActionSpec. +func (in *ExecActionSpec) DeepCopy() *ExecActionSpec { + if in == nil { + return nil + } + out := new(ExecActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecActionTarget) DeepCopyInto(out *ExecActionTarget) { + *out = *in + in.PodSelector.DeepCopyInto(&out.PodSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecActionTarget. +func (in *ExecActionTarget) DeepCopy() *ExecActionTarget { + if in == nil { + return nil + } + out := new(ExecActionTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludeResource) DeepCopyInto(out *IncludeResource) { + *out = *in + in.LabelSelector.DeepCopyInto(&out.LabelSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeResource. +func (in *IncludeResource) DeepCopy() *IncludeResource { + if in == nil { + return nil + } + out := new(IncludeResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobAction) DeepCopyInto(out *JobAction) { + *out = *in + in.Target.DeepCopyInto(&out.Target) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobAction. +func (in *JobAction) DeepCopy() *JobAction { + if in == nil { + return nil + } + out := new(JobAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobActionSpec) DeepCopyInto(out *JobActionSpec) { + *out = *in + if in.RunOnTargetPodNode != nil { + in, out := &in.RunOnTargetPodNode, &out.RunOnTargetPodNode + *out = new(bool) **out = **in } - if in.Manifests != nil { - in, out := &in.Manifests, &out.Manifests - *out = new(ManifestsStatus) - (*in).DeepCopyInto(*out) + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. -func (in *BackupStatus) DeepCopy() *BackupStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobActionSpec. +func (in *JobActionSpec) DeepCopy() *JobActionSpec { if in == nil { return nil } - out := new(BackupStatus) + out := new(JobActionSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupStatusUpdate) DeepCopyInto(out *BackupStatusUpdate) { +func (in *JobActionTarget) DeepCopyInto(out *JobActionTarget) { *out = *in + in.PodSelector.DeepCopyInto(&out.PodSelector) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatusUpdate. -func (in *BackupStatusUpdate) DeepCopy() *BackupStatusUpdate { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobActionTarget. +func (in *JobActionTarget) DeepCopy() *JobActionTarget { if in == nil { return nil } - out := new(BackupStatusUpdate) + out := new(JobActionTarget) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupTool) DeepCopyInto(out *BackupTool) { +func (in *KubeResources) DeepCopyInto(out *KubeResources) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Included != nil { + in, out := &in.Included, &out.Included + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Excluded != nil { + in, out := &in.Excluded, &out.Excluded + *out = make([]string, len(*in)) + copy(*out, *in) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupTool. -func (in *BackupTool) DeepCopy() *BackupTool { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeResources. +func (in *KubeResources) DeepCopy() *KubeResources { if in == nil { return nil } - out := new(BackupTool) + out := new(KubeResources) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BackupTool) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSelector) DeepCopyInto(out *PodSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) } - return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSelector. +func (in *PodSelector) DeepCopy() *PodSelector { + if in == nil { + return nil + } + out := new(PodSelector) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupToolList) DeepCopyInto(out *BackupToolList) { +func (in *PrepareDataConfig) DeepCopyInto(out *PrepareDataConfig) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]BackupTool, len(*in)) + if in.DataSourceRef != nil { + in, out := &in.DataSourceRef, &out.DataSourceRef + *out = new(VolumeConfig) + **out = **in + } + if in.RestoreVolumeClaims != nil { + in, out := &in.RestoreVolumeClaims, &out.RestoreVolumeClaims + *out = make([]RestoreVolumeClaim, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.RestoreVolumeClaimsTemplate != nil { + in, out := &in.RestoreVolumeClaimsTemplate, &out.RestoreVolumeClaimsTemplate + *out = new(RestoreVolumeClaimsTemplate) + (*in).DeepCopyInto(*out) + } + in.SchedulingSpec.DeepCopyInto(&out.SchedulingSpec) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupToolList. -func (in *BackupToolList) DeepCopy() *BackupToolList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrepareDataConfig. +func (in *PrepareDataConfig) DeepCopy() *PrepareDataConfig { if in == nil { return nil } - out := new(BackupToolList) + out := new(PrepareDataConfig) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BackupToolList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupToolManifestsStatus) DeepCopyInto(out *BackupToolManifestsStatus) { +func (in *ReadinessProbe) DeepCopyInto(out *ReadinessProbe) { *out = *in + in.Exec.DeepCopyInto(&out.Exec) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupToolManifestsStatus. -func (in *BackupToolManifestsStatus) DeepCopy() *BackupToolManifestsStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbe. +func (in *ReadinessProbe) DeepCopy() *ReadinessProbe { if in == nil { return nil } - out := new(BackupToolManifestsStatus) + out := new(ReadinessProbe) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupToolRestoreCommand) DeepCopyInto(out *BackupToolRestoreCommand) { +func (in *ReadinessProbeExecAction) DeepCopyInto(out *ReadinessProbeExecAction) { *out = *in - if in.RestoreCommands != nil { - in, out := &in.RestoreCommands, &out.RestoreCommands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IncrementalRestoreCommands != nil { - in, out := &in.IncrementalRestoreCommands, &out.IncrementalRestoreCommands + if in.Command != nil { + in, out := &in.Command, &out.Command *out = make([]string, len(*in)) copy(*out, *in) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupToolRestoreCommand. -func (in *BackupToolRestoreCommand) DeepCopy() *BackupToolRestoreCommand { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbeExecAction. +func (in *ReadinessProbeExecAction) DeepCopy() *ReadinessProbeExecAction { if in == nil { return nil } - out := new(BackupToolRestoreCommand) + out := new(ReadinessProbeExecAction) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupToolSpec) DeepCopyInto(out *BackupToolSpec) { +func (in *ReadyConfig) DeepCopyInto(out *ReadyConfig) { *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(corev1.ResourceRequirements) + if in.JobAction != nil { + in, out := &in.JobAction, &out.JobAction + *out = new(JobAction) (*in).DeepCopyInto(*out) } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]corev1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]corev1.EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.BackupCommands != nil { - in, out := &in.BackupCommands, &out.BackupCommands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IncrementalBackupCommands != nil { - in, out := &in.IncrementalBackupCommands, &out.IncrementalBackupCommands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Physical != nil { - in, out := &in.Physical, &out.Physical - *out = new(PhysicalConfig) + if in.ExecAction != nil { + in, out := &in.ExecAction, &out.ExecAction + *out = new(ExecAction) (*in).DeepCopyInto(*out) } - if in.Logical != nil { - in, out := &in.Logical, &out.Logical - *out = new(LogicalConfig) + if in.ConnectCredential != nil { + in, out := &in.ConnectCredential, &out.ConnectCredential + *out = new(ConnectCredential) + **out = **in + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(ReadinessProbe) (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupToolSpec. -func (in *BackupToolSpec) DeepCopy() *BackupToolSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadyConfig. +func (in *ReadyConfig) DeepCopy() *ReadyConfig { if in == nil { return nil } - out := new(BackupToolSpec) + out := new(ReadyConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupToolStatus) DeepCopyInto(out *BackupToolStatus) { +func (in *Restore) DeepCopyInto(out *Restore) { *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupToolStatus. -func (in *BackupToolStatus) DeepCopy() *BackupToolStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Restore. +func (in *Restore) DeepCopy() *Restore { if in == nil { return nil } - out := new(BackupToolStatus) + out := new(Restore) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Restore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BasePolicy) DeepCopyInto(out *BasePolicy) { +func (in *RestoreActionSpec) DeepCopyInto(out *RestoreActionSpec) { *out = *in - in.Target.DeepCopyInto(&out.Target) - if in.BackupStatusUpdates != nil { - in, out := &in.BackupStatusUpdates, &out.BackupStatusUpdates - *out = make([]BackupStatusUpdate, len(*in)) - copy(*out, *in) + if in.PrepareData != nil { + in, out := &in.PrepareData, &out.PrepareData + *out = new(JobActionSpec) + (*in).DeepCopyInto(*out) + } + if in.PostReady != nil { + in, out := &in.PostReady, &out.PostReady + *out = make([]ActionSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasePolicy. -func (in *BasePolicy) DeepCopy() *BasePolicy { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreActionSpec. +func (in *RestoreActionSpec) DeepCopy() *RestoreActionSpec { if in == nil { return nil } - out := new(BasePolicy) + out := new(RestoreActionSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonBackupPolicy) DeepCopyInto(out *CommonBackupPolicy) { +func (in *RestoreKubeResources) DeepCopyInto(out *RestoreKubeResources) { *out = *in - in.BasePolicy.DeepCopyInto(&out.BasePolicy) - in.PersistentVolumeClaim.DeepCopyInto(&out.PersistentVolumeClaim) - if in.BackupRepoName != nil { - in, out := &in.BackupRepoName, &out.BackupRepoName - *out = new(string) - **out = **in + if in.IncludeResources != nil { + in, out := &in.IncludeResources, &out.IncludeResources + *out = make([]IncludeResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonBackupPolicy. -func (in *CommonBackupPolicy) DeepCopy() *CommonBackupPolicy { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreKubeResources. +func (in *RestoreKubeResources) DeepCopy() *RestoreKubeResources { if in == nil { return nil } - out := new(CommonBackupPolicy) + out := new(RestoreKubeResources) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogicalConfig) DeepCopyInto(out *LogicalConfig) { +func (in *RestoreList) DeepCopyInto(out *RestoreList) { *out = *in - in.BackupToolRestoreCommand.DeepCopyInto(&out.BackupToolRestoreCommand) + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Restore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogicalConfig. -func (in *LogicalConfig) DeepCopy() *LogicalConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreList. +func (in *RestoreList) DeepCopy() *RestoreList { if in == nil { return nil } - out := new(LogicalConfig) + out := new(RestoreList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManifestsStatus) DeepCopyInto(out *ManifestsStatus) { +func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) { *out = *in - if in.BackupLog != nil { - in, out := &in.BackupLog, &out.BackupLog - *out = new(BackupLogStatus) + out.Backup = in.Backup + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(RestoreKubeResources) (*in).DeepCopyInto(*out) } - if in.Snapshot != nil { - in, out := &in.Snapshot, &out.Snapshot - *out = new(BackupSnapshotStatus) - **out = **in + if in.PrepareDataConfig != nil { + in, out := &in.PrepareDataConfig, &out.PrepareDataConfig + *out = new(PrepareDataConfig) + (*in).DeepCopyInto(*out) } - if in.BackupTool != nil { - in, out := &in.BackupTool, &out.BackupTool - *out = new(BackupToolManifestsStatus) - **out = **in + if in.ReadyConfig != nil { + in, out := &in.ReadyConfig, &out.ReadyConfig + *out = new(ReadyConfig) + (*in).DeepCopyInto(*out) } - if in.UserContext != nil { - in, out := &in.UserContext, &out.UserContext - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.ContainerResources.DeepCopyInto(&out.ContainerResources) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestsStatus. -func (in *ManifestsStatus) DeepCopy() *ManifestsStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec. +func (in *RestoreSpec) DeepCopy() *RestoreSpec { if in == nil { return nil } - out := new(ManifestsStatus) + out := new(RestoreSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { +func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) { *out = *in - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() } - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() } - out.InitCapacity = in.InitCapacity.DeepCopy() - if in.PersistentVolumeConfigMap != nil { - in, out := &in.PersistentVolumeConfigMap, &out.PersistentVolumeConfigMap - *out = new(PersistentVolumeConfigMap) + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(metav1.Duration) **out = **in } + in.Actions.DeepCopyInto(&out.Actions) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. -func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus. +func (in *RestoreStatus) DeepCopy() *RestoreStatus { if in == nil { return nil } - out := new(PersistentVolumeClaim) + out := new(RestoreStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeConfigMap) DeepCopyInto(out *PersistentVolumeConfigMap) { +func (in *RestoreStatusAction) DeepCopyInto(out *RestoreStatusAction) { *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + in.EndTime.DeepCopyInto(&out.EndTime) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeConfigMap. -func (in *PersistentVolumeConfigMap) DeepCopy() *PersistentVolumeConfigMap { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatusAction. +func (in *RestoreStatusAction) DeepCopy() *RestoreStatusAction { if in == nil { return nil } - out := new(PersistentVolumeConfigMap) + out := new(RestoreStatusAction) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhysicalConfig) DeepCopyInto(out *PhysicalConfig) { +func (in *RestoreStatusActions) DeepCopyInto(out *RestoreStatusActions) { *out = *in - in.BackupToolRestoreCommand.DeepCopyInto(&out.BackupToolRestoreCommand) + if in.PrepareData != nil { + in, out := &in.PrepareData, &out.PrepareData + *out = make([]RestoreStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostReady != nil { + in, out := &in.PostReady, &out.PostReady + *out = make([]RestoreStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhysicalConfig. -func (in *PhysicalConfig) DeepCopy() *PhysicalConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatusActions. +func (in *RestoreStatusActions) DeepCopy() *RestoreStatusActions { if in == nil { return nil } - out := new(PhysicalConfig) + out := new(RestoreStatusActions) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RestoreJob) DeepCopyInto(out *RestoreJob) { +func (in *RestoreVolumeClaim) DeepCopyInto(out *RestoreVolumeClaim) { *out = *in - out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.VolumeClaimSpec.DeepCopyInto(&out.VolumeClaimSpec) + out.VolumeConfig = in.VolumeConfig } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreJob. -func (in *RestoreJob) DeepCopy() *RestoreJob { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreVolumeClaim. +func (in *RestoreVolumeClaim) DeepCopy() *RestoreVolumeClaim { if in == nil { return nil } - out := new(RestoreJob) + out := new(RestoreVolumeClaim) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RestoreJob) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RestoreJobList) DeepCopyInto(out *RestoreJobList) { +func (in *RestoreVolumeClaimsTemplate) DeepCopyInto(out *RestoreVolumeClaimsTemplate) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RestoreJob, len(*in)) + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make([]RestoreVolumeClaim, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreJobList. -func (in *RestoreJobList) DeepCopy() *RestoreJobList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreVolumeClaimsTemplate. +func (in *RestoreVolumeClaimsTemplate) DeepCopy() *RestoreVolumeClaimsTemplate { if in == nil { return nil } - out := new(RestoreJobList) + out := new(RestoreVolumeClaimsTemplate) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RestoreJobList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeSettings) DeepCopyInto(out *RuntimeSettings) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeSettings. +func (in *RuntimeSettings) DeepCopy() *RuntimeSettings { + if in == nil { + return nil } - return nil + out := new(RuntimeSettings) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RestoreJobSpec) DeepCopyInto(out *RestoreJobSpec) { +func (in *SchedulePolicy) DeepCopyInto(out *SchedulePolicy) { *out = *in - in.Target.DeepCopyInto(&out.Target) - if in.TargetVolumes != nil { - in, out := &in.TargetVolumes, &out.TargetVolumes - *out = make([]corev1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TargetVolumeMounts != nil { - in, out := &in.TargetVolumeMounts, &out.TargetVolumeMounts - *out = make([]corev1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreJobSpec. -func (in *RestoreJobSpec) DeepCopy() *RestoreJobSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicy. +func (in *SchedulePolicy) DeepCopy() *SchedulePolicy { if in == nil { return nil } - out := new(RestoreJobSpec) + out := new(SchedulePolicy) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RestoreJobStatus) DeepCopyInto(out *RestoreJobStatus) { +func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) { *out = *in - if in.Expiration != nil { - in, out := &in.Expiration, &out.Expiration - *out = (*in).DeepCopy() - } - if in.StartTimestamp != nil { - in, out := &in.StartTimestamp, &out.StartTimestamp + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime *out = (*in).DeepCopy() } - if in.CompletionTimestamp != nil { - in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + if in.LastSuccessfulTime != nil { + in, out := &in.LastSuccessfulTime, &out.LastSuccessfulTime *out = (*in).DeepCopy() } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreJobStatus. -func (in *RestoreJobStatus) DeepCopy() *RestoreJobStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus. +func (in *ScheduleStatus) DeepCopy() *ScheduleStatus { if in == nil { return nil } - out := new(RestoreJobStatus) + out := new(ScheduleStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RetentionSpec) DeepCopyInto(out *RetentionSpec) { +func (in *SchedulingSpec) DeepCopyInto(out *SchedulingSpec) { *out = *in - if in.TTL != nil { - in, out := &in.TTL, &out.TTL - *out = new(string) - **out = **in + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionSpec. -func (in *RetentionSpec) DeepCopy() *RetentionSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingSpec. +func (in *SchedulingSpec) DeepCopy() *SchedulingSpec { if in == nil { return nil } - out := new(RetentionSpec) + out := new(SchedulingSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Schedule) DeepCopyInto(out *Schedule) { +func (in *SyncProgress) DeepCopyInto(out *SyncProgress) { *out = *in - if in.StartingDeadlineMinutes != nil { - in, out := &in.StartingDeadlineMinutes, &out.StartingDeadlineMinutes - *out = new(int64) - **out = **in - } - if in.Snapshot != nil { - in, out := &in.Snapshot, &out.Snapshot - *out = new(SchedulePolicy) + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) **out = **in } - if in.Datafile != nil { - in, out := &in.Datafile, &out.Datafile - *out = new(SchedulePolicy) - **out = **in - } - if in.Logfile != nil { - in, out := &in.Logfile, &out.Logfile - *out = new(SchedulePolicy) + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int32) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. -func (in *Schedule) DeepCopy() *Schedule { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncProgress. +func (in *SyncProgress) DeepCopy() *SyncProgress { if in == nil { return nil } - out := new(Schedule) + out := new(SyncProgress) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SchedulePolicy) DeepCopyInto(out *SchedulePolicy) { +func (in *TargetVolumeInfo) DeepCopyInto(out *TargetVolumeInfo) { *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePolicy. -func (in *SchedulePolicy) DeepCopy() *SchedulePolicy { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetVolumeInfo. +func (in *TargetVolumeInfo) DeepCopy() *TargetVolumeInfo { if in == nil { return nil } - out := new(SchedulePolicy) + out := new(TargetVolumeInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapshotPolicy) DeepCopyInto(out *SnapshotPolicy) { +func (in *VolumeConfig) DeepCopyInto(out *VolumeConfig) { *out = *in - in.BasePolicy.DeepCopyInto(&out.BasePolicy) - if in.Hooks != nil { - in, out := &in.Hooks, &out.Hooks - *out = new(BackupPolicyHook) - (*in).DeepCopyInto(*out) - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicy. -func (in *SnapshotPolicy) DeepCopy() *SnapshotPolicy { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeConfig. +func (in *VolumeConfig) DeepCopy() *VolumeConfig { if in == nil { return nil } - out := new(SnapshotPolicy) + out := new(VolumeConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetCluster) DeepCopyInto(out *TargetCluster) { +func (in *VolumeSnapshotStatus) DeepCopyInto(out *VolumeSnapshotStatus) { *out = *in - if in.LabelsSelector != nil { - in, out := &in.LabelsSelector, &out.LabelsSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(BackupPolicySecret) - **out = **in - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetCluster. -func (in *TargetCluster) DeepCopy() *TargetCluster { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotStatus. +func (in *VolumeSnapshotStatus) DeepCopy() *VolumeSnapshotStatus { if in == nil { return nil } - out := new(TargetCluster) + out := new(VolumeSnapshotStatus) in.DeepCopyInto(out) return out } diff --git a/cmd/dataprotection/main.go b/cmd/dataprotection/main.go index efdfb091f2d..67f7fffbca3 100644 --- a/cmd/dataprotection/main.go +++ b/cmd/dataprotection/main.go @@ -197,48 +197,49 @@ func main() { os.Exit(1) } - if err = (&dpcontrollers.BackupToolReconciler{ + if err = (&dpcontrollers.ActionSetReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("backup-tool-controller"), + Recorder: mgr.GetEventRecorderFor("actionset-controller"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BackupTool") + setupLog.Error(err, "unable to create controller", "controller", "ActionSet") os.Exit(1) } - if err = (&dpcontrollers.BackupPolicyReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("backup-policy-controller"), + if err = (&dpcontrollers.BackupReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("backup-controller"), + RestConfig: mgr.GetConfig(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BackupPolicy") + setupLog.Error(err, "unable to create controller", "controller", "Backup") os.Exit(1) } - if err = (&dpcontrollers.CronJobReconciler{ + if err = (&dpcontrollers.RestoreReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("cronjob-controller"), + Recorder: mgr.GetEventRecorderFor("restore-controller"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "CronJob") + setupLog.Error(err, "unable to create controller", "controller", "Restore") os.Exit(1) } - if err = (&dpcontrollers.BackupReconciler{ + if err = (&dpcontrollers.BackupPolicyReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("backup-controller"), + Recorder: mgr.GetEventRecorderFor("backup-policy-controller"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Backup") + setupLog.Error(err, "unable to create controller", "controller", "BackupPolicy") os.Exit(1) } - if err = (&dpcontrollers.RestoreJobReconciler{ + if err = (&dpcontrollers.BackupScheduleReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("restore-job-controller"), + Recorder: mgr.GetEventRecorderFor("backup-schedule-controller"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "RestoreJob") + setupLog.Error(err, "unable to create controller", "controller", "BackupSchedule") os.Exit(1) } diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 9e687ab00e8..76706f68643 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -46,7 +46,7 @@ import ( // +kubebuilder:scaffold:imports appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" @@ -77,7 +77,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(appsv1alpha1.AddToScheme(scheme)) - utilruntime.Must(dataprotectionv1alpha1.AddToScheme(scheme)) + utilruntime.Must(dpv1alpha1.AddToScheme(scheme)) utilruntime.Must(snapshotv1.AddToScheme(scheme)) utilruntime.Must(snapshotv1beta1.AddToScheme(scheme)) utilruntime.Must(extensionsv1alpha1.AddToScheme(scheme)) diff --git a/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml b/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml index 5b25e90ea08..2073e6f7e8e 100644 --- a/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml +++ b/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml @@ -53,6 +53,268 @@ spec: the specified componentDefinition. items: properties: + backupMethods: + description: backupMethods defines the backup methods. + items: + description: BackupMethod defines the backup method. + properties: + actionSetName: + description: actionSetName refers to the ActionSet object + that defines the backup actions. For volume snapshot + backup, the actionSet is not required, the controller + will use the CSI volume snapshotter to create the snapshot. + type: string + env: + description: env specifies the environment variables for + the backup workload. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are + expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + name: + description: the name of backup method. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + runtimeSettings: + description: runtimeSettings specifies runtime settings + for the backup workload container. + properties: + resources: + description: 'resources specifies the resource required + by container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It + can only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + snapshotVolumes: + default: false + description: snapshotVolumes specifies whether to take + snapshots of persistent volumes. if true, the BackupScript + is not required, the controller will use the CSI volume + snapshotter to create the snapshot. + type: boolean + targetVolumes: + description: targetVolumes specifies which volumes from + the target should be mounted in the backup workload. + properties: + volumeMounts: + description: volumeMounts specifies the mount for + the volumes specified in `Volumes` section. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. Behaves similarly to SubPath but + environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes indicates the list of volumes + of targeted application that should be mounted on + the backup job. + items: + type: string + type: array + type: object + required: + - name + type: object + type: array componentDefRef: description: componentDefRef references componentDef defined in ClusterDefinition spec. Need to comply with IANA Service @@ -60,378 +322,86 @@ spec: maxLength: 22 pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ type: string - datafile: - description: the policy for datafile backup. + retentionPeriod: + default: 7d + description: "retentionPeriod determines a duration up to which + the backup should be kept. controller will remove all backups + that are older than the RetentionPeriod. For example, RetentionPeriod + of `30d` will keep only the backups of last 30 days. Sample + duration format: - years: \t2y - months: \t6mo - days: \t\t30d + - hours: \t12h - minutes: \t30m You can also combine the above + durations. For example: 30d12h30m" + type: string + schedules: + description: schedule policy for backup. + items: + properties: + backupMethod: + description: backupMethod specifies the backup method + name that is defined in backupPolicy. + type: string + cronExpression: + description: the cron expression for schedule, the timezone + is in UTC. see https://en.wikipedia.org/wiki/Cron. + type: string + enabled: + description: enabled specifies whether the backup schedule + is enabled or not. + type: boolean + required: + - backupMethod + - cronExpression + type: object + type: array + target: + description: target instance for backup. properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can - execute. - type: string - path: - description: 'specify the json path of backup object - for patch. example: manifests.backupLog -- means - patch the backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect - backup status metadata. The script must exist in - the container of ContainerName and the output format - must be set to JSON. Note that outputting to stderr - may cause the result format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: - before backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup - target pod. if true, will use the service account - of the backup target pod. otherwise, will use the - system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, - only support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + account: + description: refer to spec.componentDef.systemAccounts.accounts[*].name + in ClusterDefinition. the secret created by this account + will be used to connect the database. if not set, the + secret created by spec.ConnectionCredential of the ClusterDefinition + will be used. it will be transformed to a secret for BackupPolicy's + target secret. type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. - Value must be non-negative integer. 0 means NO limit on - the number of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target instance for backup. + connectionCredentialKey: + description: connectionCredentialKey defines connection + credential key in secret which created by spec.ConnectionCredential + of the ClusterDefinition. it will be ignored when "account" + is set. properties: - account: - description: refer to spec.componentDef.systemAccounts.accounts[*].name - in ClusterDefinition. the secret created by this account - will be used to connect the database. if not set, - the secret created by spec.ConnectionCredential of - the ClusterDefinition will be used. it will be transformed - to a secret for BackupPolicy's target secret. + hostKey: + description: hostKey specifies the map key of the host + in the connection credential secret. type: string - connectionCredentialKey: - description: connectionCredentialKey defines connection - credential key in secret which created by spec.ConnectionCredential - of the ClusterDefinition. it will be ignored when - "account" is set. - properties: - passwordKey: - description: the key of password in the ConnectionCredential - secret. if not set, the default key is "password". - type: string - usernameKey: - description: the key of username in the ConnectionCredential - secret. if not set, the default key is "username". - type: string - type: object - role: - description: 'select instance of corresponding role - for backup, role are: - the name of Leader/Follower/Leaner - for Consensus component. - primary or secondary for - Replication component. finally, invalid role of the - component will be ignored. such as if workload type - is Replication and component''s replicas is 1, the - secondary role is invalid. and it also will be ignored - when component is Stateful/Stateless. the role will - be transformed to a role LabelSelector for BackupPolicy''s - target attribute.' + passwordKey: + description: the key of password in the ConnectionCredential + secret. if not set, the default key is "password". type: string - type: object - type: object - logfile: - description: the policy for logfile backup. - properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can - execute. - type: string - path: - description: 'specify the json path of backup object - for patch. example: manifests.backupLog -- means - patch the backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect - backup status metadata. The script must exist in - the container of ContainerName and the output format - must be set to JSON. Note that outputting to stderr - may cause the result format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: - before backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup - target pod. if true, will use the service account - of the backup target pod. otherwise, will use the - system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, - only support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. - Value must be non-negative integer. 0 means NO limit on - the number of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target instance for backup. - properties: - account: - description: refer to spec.componentDef.systemAccounts.accounts[*].name - in ClusterDefinition. the secret created by this account - will be used to connect the database. if not set, - the secret created by spec.ConnectionCredential of - the ClusterDefinition will be used. it will be transformed - to a secret for BackupPolicy's target secret. + portKey: + default: port + description: portKey specifies the map key of the port + in the connection credential secret. type: string - connectionCredentialKey: - description: connectionCredentialKey defines connection - credential key in secret which created by spec.ConnectionCredential - of the ClusterDefinition. it will be ignored when - "account" is set. - properties: - passwordKey: - description: the key of password in the ConnectionCredential - secret. if not set, the default key is "password". - type: string - usernameKey: - description: the key of username in the ConnectionCredential - secret. if not set, the default key is "username". - type: string - type: object - role: - description: 'select instance of corresponding role - for backup, role are: - the name of Leader/Follower/Leaner - for Consensus component. - primary or secondary for - Replication component. finally, invalid role of the - component will be ignored. such as if workload type - is Replication and component''s replicas is 1, the - secondary role is invalid. and it also will be ignored - when component is Stateful/Stateless. the role will - be transformed to a role LabelSelector for BackupPolicy''s - target attribute.' + usernameKey: + description: the key of username in the ConnectionCredential + secret. if not set, the default key is "username". type: string type: object - type: object - retention: - description: retention describe how long the Backup should be - retained. if not set, will be retained forever. - properties: - ttl: - description: ttl is a time string ending with the 'd'|'D'|'h'|'H' - character to describe how long the Backup should be retained. - if not set, will be retained forever. - pattern: ^\d+[d|D|h|H]$ + role: + description: 'select instance of corresponding role for + backup, role are: - the name of Leader/Follower/Leaner + for Consensus component. - primary or secondary for Replication + component. finally, invalid role of the component will + be ignored. such as if workload type is Replication and + component''s replicas is 1, the secondary role is invalid. + and it also will be ignored when component is Stateful/Stateless. + the role will be transformed to a role LabelSelector for + BackupPolicy''s target attribute.' type: string type: object - schedule: - description: schedule policy for backup. - properties: - datafile: - description: schedule policy for datafile backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. - type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - logfile: - description: schedule policy for logfile backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. - type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - snapshot: - description: schedule policy for snapshot backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. - type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - startingDeadlineMinutes: - description: startingDeadlineMinutes defines the deadline - in minutes for starting the backup job if it misses scheduled - time for any reason. - format: int64 - maximum: 1440 - minimum: 0 - type: integer - type: object - snapshot: - description: the policy for snapshot backup. - properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can - execute. - type: string - path: - description: 'specify the json path of backup object - for patch. example: manifests.backupLog -- means - patch the backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect - backup status metadata. The script must exist in - the container of ContainerName and the output format - must be set to JSON. Note that outputting to stderr - may cause the result format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: - before backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup - target pod. if true, will use the service account - of the backup target pod. otherwise, will use the - system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. - Value must be non-negative integer. 0 means NO limit on - the number of backups. - format: int32 - type: integer - hooks: - description: execute hook commands for backup. - properties: - containerName: - description: which container can exec command - type: string - image: - description: exec command with image - type: string - postCommands: - description: post backup to perform commands - items: - type: string - type: array - preCommands: - description: pre backup to perform commands - items: - type: string - type: array - type: object - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target instance for backup. - properties: - account: - description: refer to spec.componentDef.systemAccounts.accounts[*].name - in ClusterDefinition. the secret created by this account - will be used to connect the database. if not set, - the secret created by spec.ConnectionCredential of - the ClusterDefinition will be used. it will be transformed - to a secret for BackupPolicy's target secret. - type: string - connectionCredentialKey: - description: connectionCredentialKey defines connection - credential key in secret which created by spec.ConnectionCredential - of the ClusterDefinition. it will be ignored when - "account" is set. - properties: - passwordKey: - description: the key of password in the ConnectionCredential - secret. if not set, the default key is "password". - type: string - usernameKey: - description: the key of username in the ConnectionCredential - secret. if not set, the default key is "username". - type: string - type: object - role: - description: 'select instance of corresponding role - for backup, role are: - the name of Leader/Follower/Leaner - for Consensus component. - primary or secondary for - Replication component. finally, invalid role of the - component will be ignored. such as if workload type - is Replication and component''s replicas is 1, the - secondary role is invalid. and it also will be ignored - when component is Stateful/Stateless. the role will - be transformed to a role LabelSelector for BackupPolicy''s - target attribute.' - type: string - type: object - type: object required: + - backupMethods - componentDefRef type: object minItems: 1 diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml index f06624d15d3..d741c62fef4 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml @@ -118,15 +118,7 @@ spec: description: enabled defines whether to enable automated backup. type: boolean method: - allOf: - - enum: - - snapshot - - backupTool - - enum: - - snapshot - - backupTool - default: snapshot - description: 'backup method, support: snapshot, backupTool.' + description: backup method name to use, that is defined in backupPolicy. type: string pitrEnabled: default: false @@ -138,11 +130,14 @@ spec: will use the default backupRepo. type: string retentionPeriod: - default: 1d - description: retentionPeriod is a time string ending with the - 'd'|'D'|'h'|'H' character to describe how long the Backup should - be retained. if not set, will be retained forever. - pattern: ^\d+[d|D|h|H]$ + default: 7d + description: "retentionPeriod determines a duration up to which + the backup should be kept. controller will remove all backups + that are older than the RetentionPeriod. For example, RetentionPeriod + of `30d` will keep only the backups of last 30 days. Sample + duration format: - years: \t2y - months: \t6mo - days: \t\t30d + - hours: \t12h - minutes: \t30m You can also combine the above + durations. For example: 30d12h30m" type: string startingDeadlineMinutes: description: startingDeadlineMinutes defines the deadline in minutes @@ -152,8 +147,6 @@ spec: maximum: 1440 minimum: 0 type: integer - required: - - method type: object clusterDefinitionRef: description: Cluster referencing ClusterDefinition name. This is an diff --git a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml index 4c5da53a769..16bf1252b31 100644 --- a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml +++ b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml @@ -63,17 +63,15 @@ spec: backupSpec: description: backupSpec defines how to backup the cluster. properties: + backupMethod: + description: Backup method name that is defined in backupPolicy. + type: string backupName: description: backupName is the name of the backup. type: string backupPolicyName: description: Which backupPolicy is applied to perform this backup type: string - backupType: - default: datafile - description: Backup Type. datafile or logfile or snapshot. If - not set, datafile is the default type. - type: string parentBackupName: description: if backupType is incremental, parentBackupName is required. diff --git a/config/crd/bases/dataprotection.kubeblocks.io_actionsets.yaml b/config/crd/bases/dataprotection.kubeblocks.io_actionsets.yaml new file mode 100644 index 00000000000..85c9df43f48 --- /dev/null +++ b/config/crd/bases/dataprotection.kubeblocks.io_actionsets.yaml @@ -0,0 +1,554 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: actionsets.dataprotection.kubeblocks.io +spec: + group: dataprotection.kubeblocks.io + names: + categories: + - kubeblocks + kind: ActionSet + listKind: ActionSetList + plural: actionsets + shortNames: + - as + singular: actionset + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backupType + name: BACKUP-TYPE + type: string + - jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ActionSet is the Schema for the actionsets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ActionSetSpec defines the desired state of ActionSet + properties: + backup: + description: backup specifies the backup action. + properties: + backupData: + description: backupData specifies the backup data action. + properties: + command: + description: command specifies the commands to back up the + volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if it encounters + an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to run the + job workload on the target pod node. If backup container + should mount the target pod's volume, this field should + be set to true. + type: boolean + syncProgress: + description: syncProgress specifies whether to sync the backup + progress and its interval seconds. + properties: + enabled: + description: enabled specifies whether to sync the backup + progress. If enabled, a sidecar container will be created + to sync the backup progress to the Backup CR status. + type: boolean + intervalSeconds: + default: 60 + description: intervalSeconds specifies the interval seconds + to sync the backup progress. + format: int32 + type: integer + type: object + required: + - command + - image + type: object + postBackup: + description: postBackup specifies a hook that should be executed + after the backup. + items: + description: ActionSpec defines an action that should be executed. + Only one of the fields may be set. + properties: + exec: + description: exec specifies the action should be executed + by the pod exec API in a container. + properties: + command: + description: Command is the command and arguments to + execute. + items: + type: string + minItems: 1 + type: array + container: + description: container is the container in the pod where + the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time + should wait for the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + job: + description: job specifies the action should be executed + by a Kubernetes Job. + properties: + command: + description: command specifies the commands to back + up the volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to + run the job workload on the target pod node. If backup + container should mount the target pod's volume, this + field should be set to true. + type: boolean + required: + - command + - image + type: object + type: object + type: array + preBackup: + description: preBackup specifies a hook that should be executed + before the backup. + items: + description: ActionSpec defines an action that should be executed. + Only one of the fields may be set. + properties: + exec: + description: exec specifies the action should be executed + by the pod exec API in a container. + properties: + command: + description: Command is the command and arguments to + execute. + items: + type: string + minItems: 1 + type: array + container: + description: container is the container in the pod where + the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time + should wait for the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + job: + description: job specifies the action should be executed + by a Kubernetes Job. + properties: + command: + description: command specifies the commands to back + up the volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to + run the job workload on the target pod node. If backup + container should mount the target pod's volume, this + field should be set to true. + type: boolean + required: + - command + - image + type: object + type: object + type: array + type: object + backupType: + allOf: + - enum: + - Full + - Incremental + - Differential + - Continuous + - enum: + - Full + - Incremental + - Differential + - Continuous + default: Full + description: 'backupType specifies the backup type, supported values: + Full, Continuous. Full means full backup. Incremental means back + up data that have changed since the last backup (full or incremental). + Differential means back up data that have changed since the last + full backup. Continuous will back up the transaction log continuously, + the PITR (Point in Time Recovery). can be performed based on the + continuous backup and full backup.' + type: string + env: + description: List of environment variables to set in the container. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the + string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + envFrom: + description: List of sources to populate environment variables in + the container. The keys defined within a source must be a C_IDENTIFIER. + All invalid keys will be reported as an event when the container + is starting. When a key exists in multiple sources, the value associated + with the last source will take precedence. Values defined by an + Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + restore: + description: restore specifies the restore action. + properties: + postReady: + description: postReady specifies the action to execute after the + data is ready. + items: + description: ActionSpec defines an action that should be executed. + Only one of the fields may be set. + properties: + exec: + description: exec specifies the action should be executed + by the pod exec API in a container. + properties: + command: + description: Command is the command and arguments to + execute. + items: + type: string + minItems: 1 + type: array + container: + description: container is the container in the pod where + the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time + should wait for the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + job: + description: job specifies the action should be executed + by a Kubernetes Job. + properties: + command: + description: command specifies the commands to back + up the volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to + run the job workload on the target pod node. If backup + container should mount the target pod's volume, this + field should be set to true. + type: boolean + required: + - command + - image + type: object + type: object + type: array + prepareData: + description: prepareData specifies the action to prepare data. + properties: + command: + description: command specifies the commands to back up the + volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if it encounters + an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to run the + job workload on the target pod node. If backup container + should mount the target pod's volume, this field should + be set to true. + type: boolean + required: + - command + - image + type: object + type: object + required: + - backupType + type: object + status: + description: ActionSetStatus defines the observed state of ActionSet + properties: + message: + description: A human-readable message indicating details about why + the ActionSet is in this phase. + type: string + observedGeneration: + description: generation number + format: int64 + type: integer + phase: + description: phase - in list of [Available,Unavailable] + enum: + - Available + - Unavailable + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml index 660c40806d3..ec61183f863 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml @@ -20,20 +20,19 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: + - jsonPath: .spec.backupRepoName + name: BACKUP-REPO + type: string - jsonPath: .status.phase name: STATUS type: string - - jsonPath: .status.lastScheduleTime - name: LAST SCHEDULE - type: string - jsonPath: .metadata.creationTimestamp name: AGE type: date name: v1alpha1 schema: openAPIV3Schema: - description: BackupPolicy is the Schema for the backuppolicies API (defined - by User) + description: BackupPolicy is the Schema for the backuppolicies API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -50,542 +49,389 @@ spec: spec: description: BackupPolicySpec defines the desired state of BackupPolicy properties: - datafile: - description: the policy for datafile backup. - properties: - backupRepoName: - description: refer to BackupRepo and the backup data will be stored - in the corresponding repo. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can execute. - type: string - path: - description: 'specify the json path of backup object for - patch. example: manifests.backupLog -- means patch the - backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect backup - status metadata. The script must exist in the container - of ContainerName and the output format must be set to - JSON. Note that outputting to stderr may cause the result - format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: before - backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup target - pod. if true, will use the service account of the backup - target pod. otherwise, will use the system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, only - support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. Value - must be non-negative integer. 0 means NO limit on the number - of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - persistentVolumeClaim: - description: refer to PersistentVolumeClaim and the backup data - will be stored in the corresponding persistent volume. - properties: - createPolicy: - default: IfNotPresent - description: 'createPolicy defines the policy for creating - the PersistentVolumeClaim, enum values: - Never: do nothing - if the PersistentVolumeClaim not exists. - IfNotPresent: - create the PersistentVolumeClaim if not present and the - accessModes only contains ''ReadWriteMany''.' - enum: - - IfNotPresent - - Never - type: string - initCapacity: - anyOf: - - type: integer - - type: string - description: initCapacity represents the init storage size - of the PersistentVolumeClaim which should be created if - not exist. and the default value is 100Gi if it is empty. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - name: - description: the name of PersistentVolumeClaim to store backup - data. - type: string - persistentVolumeConfigMap: - description: 'persistentVolumeConfigMap references the configmap - which contains a persistentVolume template. key must be - "persistentVolume" and value is the "PersistentVolume" struct. - support the following built-in Objects: - $(GENERATE_NAME): - generate a specific format "`PVC NAME`-`PVC NAMESPACE`". - if the PersistentVolumeClaim not exists and CreatePolicy - is "IfNotPresent", the controller will create it by this - template. this is a mutually exclusive setting with "storageClassName".' + backoffLimit: + description: Specifies the number of retries before marking the backup + failed. + format: int32 + maximum: 10 + minimum: 0 + type: integer + backupMethods: + description: backupMethods defines the backup methods. + items: + description: BackupMethod defines the backup method. + properties: + actionSetName: + description: actionSetName refers to the ActionSet object that + defines the backup actions. For volume snapshot backup, the + actionSet is not required, the controller will use the CSI + volume snapshotter to create the snapshot. + type: string + env: + description: env specifies the environment variables for the + backup workload. + items: + description: EnvVar represents an environment variable present + in a Container. properties: name: - description: the name of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + description: Name of the environment variable. Must be + a C_IDENTIFIER. type: string - namespace: - description: the namespace of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' type: string - required: - - name - - namespace - type: object - storageClassName: - description: storageClassName is the name of the StorageClass - required by the claim. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - type: object - target: - description: target database cluster for backup. - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup - policy template. if still not set, the controller will check - if any system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: passwordKey the map key of the password in - the connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the - connection credential secret - type: string required: - name type: object - required: - - labelsSelector - type: object - required: - - target - type: object - logfile: - description: the policy for logfile backup. - properties: - backupRepoName: - description: refer to BackupRepo and the backup data will be stored - in the corresponding repo. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupStatusUpdates: - description: define how to update metadata for backup status. - items: + type: array + name: + description: the name of backup method. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + runtimeSettings: + description: runtimeSettings specifies runtime settings for + the backup workload container. properties: - containerName: - description: which container name that kubectl can execute. - type: string - path: - description: 'specify the json path of backup object for - patch. example: manifests.backupLog -- means patch the - backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect backup - status metadata. The script must exist in the container - of ContainerName and the output format must be set to - JSON. Note that outputting to stderr may cause the result - format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: before - backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup target - pod. if true, will use the service account of the backup - target pod. otherwise, will use the system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, only - support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. Value - must be non-negative integer. 0 means NO limit on the number - of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - persistentVolumeClaim: - description: refer to PersistentVolumeClaim and the backup data - will be stored in the corresponding persistent volume. - properties: - createPolicy: - default: IfNotPresent - description: 'createPolicy defines the policy for creating - the PersistentVolumeClaim, enum values: - Never: do nothing - if the PersistentVolumeClaim not exists. - IfNotPresent: - create the PersistentVolumeClaim if not present and the - accessModes only contains ''ReadWriteMany''.' - enum: - - IfNotPresent - - Never - type: string - initCapacity: - anyOf: - - type: integer - - type: string - description: initCapacity represents the init storage size - of the PersistentVolumeClaim which should be created if - not exist. and the default value is 100Gi if it is empty. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - name: - description: the name of PersistentVolumeClaim to store backup - data. - type: string - persistentVolumeConfigMap: - description: 'persistentVolumeConfigMap references the configmap - which contains a persistentVolume template. key must be - "persistentVolume" and value is the "PersistentVolume" struct. - support the following built-in Objects: - $(GENERATE_NAME): - generate a specific format "`PVC NAME`-`PVC NAMESPACE`". - if the PersistentVolumeClaim not exists and CreatePolicy - is "IfNotPresent", the controller will create it by this - template. this is a mutually exclusive setting with "storageClassName".' - properties: - name: - description: the name of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - namespace: - description: the namespace of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ - type: string - required: - - name - - namespace - type: object - storageClassName: - description: storageClassName is the name of the StorageClass - required by the claim. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - type: object - target: - description: target database cluster for backup. - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: + resources: + description: 'resources specifies the resource required + by container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where + this field is used. It makes that resource available + inside a container. type: string - type: array - required: - - key - - operator + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + snapshotVolumes: + default: false + description: snapshotVolumes specifies whether to take snapshots + of persistent volumes. if true, the BackupScript is not required, + the controller will use the CSI volume snapshotter to create + the snapshot. + type: boolean + targetVolumes: + description: targetVolumes specifies which volumes from the + target should be mounted in the backup workload. + properties: + volumeMounts: + description: volumeMounts specifies the mount for the volumes + specified in `Volumes` section. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the + volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and the + other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults to + "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the container's + environment. Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup - policy template. if still not set, the controller will check - if any system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: array + volumes: + description: Volumes indicates the list of volumes of targeted + application that should be mounted on the backup job. + items: type: string - passwordKey: - default: password - description: passwordKey the map key of the password in - the connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the - connection credential secret - type: string - required: - - name - type: object - required: - - labelsSelector - type: object - required: - - target - type: object - retention: - description: retention describe how long the Backup should be retained. - if not set, will be retained forever. - properties: - ttl: - description: ttl is a time string ending with the 'd'|'D'|'h'|'H' - character to describe how long the Backup should be retained. - if not set, will be retained forever. - pattern: ^\d+[d|D|h|H]$ - type: string - type: object - schedule: - description: schedule policy for backup. + type: array + type: object + required: + - name + type: object + type: array + backupRepoName: + description: backupRepoName is the name of BackupRepo and the backup + data will be stored in this repository. If not set, will be stored + in the default backup repository. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + pathPrefix: + description: pathPrefix is the directory inside the backup repository + to store the backup content. It is a relative to the path of the + backup repository. + type: string + target: + description: target specifies the target information to back up. properties: - datafile: - description: schedule policy for datafile backup. + connectionCredential: + description: connectionCredential specifies the connection credential + to connect to the target database cluster. properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. + hostKey: + description: hostKey specifies the map key of the host in + the connection credential secret. type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - logfile: - description: schedule policy for logfile backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. + passwordKey: + default: password + description: passwordKey specifies the map key of the password + in the connection credential secret. + type: string + portKey: + default: port + description: portKey specifies the map key of the port in + the connection credential secret. + type: string + secretName: + description: secretName refers to the Secret object that contains + the connection credential. + type: string + usernameKey: + default: username + description: usernameKey specifies the map key of the user + in the connection credential secret. type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable type: object - snapshot: - description: schedule policy for snapshot backup. + podSelector: + description: podSelector is used to find the target pod. The volumes + of the target pod will be backed up. properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + strategy: + default: Any + description: 'strategy specifies the strategy to select the + target pod when multiple pods are selected. Valid values + are: - All: select all pods that match the labelsSelector. + - Any: select any one pod that match the labelsSelector.' + enum: + - All + - Any type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable type: object - startingDeadlineMinutes: - description: startingDeadlineMinutes defines the deadline in minutes - for starting the backup job if it misses scheduled time for - any reason. - format: int64 - maximum: 1440 - minimum: 0 - type: integer - type: object - snapshot: - description: the policy for snapshot backup. - properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can execute. - type: string - path: - description: 'specify the json path of backup object for - patch. example: manifests.backupLog -- means patch the - backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect backup - status metadata. The script must exist in the container - of ContainerName and the output format must be set to - JSON. Note that outputting to stderr may cause the result - format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: before - backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup target - pod. if true, will use the service account of the backup - target pod. otherwise, will use the system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. Value - must be non-negative integer. 0 means NO limit on the number - of backups. - format: int32 - type: integer - hooks: - description: execute hook commands for backup. + x-kubernetes-map-type: atomic + resources: + description: resources specifies the kubernetes resources to back + up. properties: - containerName: - description: which container can exec command - type: string - image: - description: exec command with image - type: string - postCommands: - description: post backup to perform commands + excluded: + description: excluded is a slice of namespaced-scoped resource + type names to exclude in the kubernetes resources. The default + value is empty. items: type: string type: array - preCommands: - description: pre backup to perform commands + included: + default: + - '*' + description: included is a slice of namespaced-scoped resource + type names to include in the kubernetes resources. The default + value is "*", which means all resource types will be included. items: type: string type: array - type: object - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target database cluster for backup. - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. + selector: + description: selector is a metav1.LabelSelector to filter + the target kubernetes resources that need to be backed up. + If not set, will do not back up any kubernetes resources. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -630,64 +476,34 @@ spec: type: object type: object x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup - policy template. if still not set, the controller will check - if any system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: passwordKey the map key of the password in - the connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the - connection credential secret - type: string - required: - - name - type: object - required: - - labelsSelector type: object - required: - - target + serviceAccountName: + description: serviceAccountName specifies the service account + to run the backup workload. + type: string type: object + required: + - backupMethods + - target type: object status: description: BackupPolicyStatus defines the observed state of BackupPolicy properties: - failureReason: - description: the reason if backup policy check failed. - type: string - lastScheduleTime: - description: information when was the last time the job was successfully - scheduled. - format: date-time - type: string - lastSuccessfulTime: - description: information when was the last time the job successfully - completed. - format: date-time + message: + description: A human-readable message indicating details about why + the BackupPolicy is in this phase. type: string observedGeneration: description: observedGeneration is the most recent generation observed - for this BackupPolicy. It corresponds to the Cluster's generation, + for this BackupPolicy. It refers to the BackupPolicy's generation, which is updated on mutation by the API Server. format: int64 type: integer phase: - description: 'backup policy phase valid value: Available, Failed.' + description: phase - in list of [Available,Unavailable] enum: - Available - - Failed + - Unavailable type: string type: object type: object diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml index 27fa8f29e72..0126b52ced5 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml @@ -18,15 +18,18 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .spec.backupType - name: TYPE + - jsonPath: .spec.backupPolicyName + name: POLICY + type: string + - jsonPath: .spec.backupMethod + name: METHOD + type: string + - jsonPath: .status.backupRepoName + name: REPO type: string - jsonPath: .status.phase name: STATUS type: string - - jsonPath: .status.sourceCluster - name: SOURCE-CLUSTER - type: string - jsonPath: .status.totalSize name: TOTAL-SIZE type: string @@ -34,15 +37,18 @@ spec: name: DURATION type: string - jsonPath: .metadata.creationTimestamp - name: CREATE-TIME + name: CREATION-TIME type: string - jsonPath: .status.completionTimestamp name: COMPLETION-TIME type: string + - jsonPath: .status.expiration + name: EXPIRATION-TIME + type: string name: v1alpha1 schema: openAPIV3Schema: - description: Backup is the Schema for the backups API (defined by User). + description: Backup is the Schema for the backups API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -59,135 +65,444 @@ spec: spec: description: BackupSpec defines the desired state of Backup. properties: + backupMethod: + description: backupMethod specifies the backup method name that is + defined in backupPolicy. + type: string backupPolicyName: - description: Which backupPolicy is applied to perform this backup + description: Which backupPolicy is applied to perform this backup. pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string - backupType: - default: datafile - description: Backup Type. datafile or logfile or snapshot. If not - set, datafile is the default type. - enum: - - datafile - - logfile - - snapshot + deletionPolicy: + allOf: + - enum: + - Delete + - Retain + - enum: + - Delete + - Retain + default: Delete + description: deletionPolicy determines whether the backup contents + stored in backup repository should be deleted when the backup custom + resource is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the backup content and its physical snapshot + on backup repository are kept. "Delete" means that the backup content + and its physical snapshot on backup repository are deleted. type: string parentBackupName: - description: if backupType is incremental, parentBackupName is required. + description: parentBackupName determines the parent backup name for + incremental or differential backup. + type: string + retentionPeriod: + default: 7d + description: "retentionPeriod determines a duration up to which the + backup should be kept. controller will remove all backups that are + older than the RetentionPeriod. For example, RetentionPeriod of + `30d` will keep only the backups of last 30 days. Sample duration + format: - years: \t2y - months: \t6mo - days: \t\t30d - hours: \t12h + - minutes: \t30m You can also combine the above durations. For example: + 30d12h30m" type: string required: + - backupMethod - backupPolicyName - - backupType type: object status: description: BackupStatus defines the observed state of Backup. properties: - availableReplicas: - description: availableReplicas available replicas for statefulSet - which created by backup. - format: int32 - type: integer - backupToolName: - description: backupToolName references the backup tool name. + actions: + description: actions records the actions information for this backup. + items: + properties: + actionType: + description: actionType is the type of the action. + type: string + availableReplicas: + description: availableReplicas available replicas for statefulSet + action. + format: int32 + type: integer + completionTimestamp: + description: completionTimestamp records the time an action + was completed. + format: date-time + type: string + failureReason: + description: failureReason is an error that caused the backup + to fail. + type: string + name: + description: name is the name of the action. + type: string + objectRef: + description: objectRef is the object reference for the action. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + phase: + description: phase is the current state of the action. + type: string + startTimestamp: + description: startTimestamp records the time an action was started. + format: date-time + type: string + timeRange: + description: timeRange records the time range of backed up data, + for PITR, this is the time range of recoverable data. + properties: + end: + description: end records the end time of backup. + format: date-time + type: string + start: + description: start records the start time of backup. + format: date-time + type: string + type: object + totalSize: + description: totalSize is the total size of backed up data size. + A string with capacity units in the format of "1Gi", "1Mi", + "1Ki". + type: string + volumeSnapshots: + description: volumeSnapshots records the volume snapshot status + for the action. + items: + properties: + contentName: + description: contentName is the name of the volume snapshot + content. + type: string + name: + description: name is the name of the volume snapshot. + type: string + size: + description: size is the size of the volume snapshot. + type: string + volumeName: + description: volumeName is the name of the volume. + type: string + type: object + type: array + type: object + type: array + backupMethod: + description: backupMethod records the backup method information for + this backup. Refer to BackupMethod for more details. + properties: + actionSetName: + description: actionSetName refers to the ActionSet object that + defines the backup actions. For volume snapshot backup, the + actionSet is not required, the controller will use the CSI volume + snapshotter to create the snapshot. + type: string + env: + description: env specifies the environment variables for the backup + workload. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + name: + description: the name of backup method. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + runtimeSettings: + description: runtimeSettings specifies runtime settings for the + backup workload container. + properties: + resources: + description: 'resources specifies the resource required by + container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + snapshotVolumes: + default: false + description: snapshotVolumes specifies whether to take snapshots + of persistent volumes. if true, the BackupScript is not required, + the controller will use the CSI volume snapshotter to create + the snapshot. + type: boolean + targetVolumes: + description: targetVolumes specifies which volumes from the target + should be mounted in the backup workload. + properties: + volumeMounts: + description: volumeMounts specifies the mount for the volumes + specified in `Volumes` section. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the + volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and the + other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's + root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves + similarly to SubPath but environment variable references + $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes indicates the list of volumes of targeted + application that should be mounted on the backup job. + items: + type: string + type: array + type: object + required: + - name + type: object + backupRepoName: + description: backupRepoName is the name of the backup repository. type: string completionTimestamp: - description: Date/time when the backup finished being processed. + description: completionTimestamp records the time a backup was completed. + Completion time is recorded even on failed backups. The server's + time is used for CompletionTimestamp. format: date-time type: string duration: description: The duration time of backup execution. When converted - to a string, the form is "1h2m0.5s". + to a string, the format is "1h2m0.5s". type: string expiration: - description: The date and time when the Backup is eligible for garbage - collection. 'null' means the Backup is NOT be cleaned except delete + description: expiration is when this backup is eligible for garbage + collection. 'null' means the Backup will NOT be cleaned except delete manual. format: date-time type: string failureReason: - description: The reason for a backup failure. + description: failureReason is an error that caused the backup to fail. type: string - logFilePersistentVolumeClaimName: - description: logFilePersistentVolumeClaimName saves the logfile backup - data. + formatVersion: + description: formatVersion is the backup format version, including + major, minor and patch version. type: string - manifests: - description: manifests determines the backup metadata info. - properties: - backupLog: - description: backupLog records startTime and stopTime of data - logging. - properties: - startTime: - description: startTime records the start time of data logging. - format: date-time - type: string - stopTime: - description: stopTime records the stop time of data logging. - format: date-time - type: string - type: object - backupSnapshot: - description: snapshot records the volume snapshot metadata. - properties: - volumeSnapshotContentName: - description: volumeSnapshotContentName specifies the name - of a pre-existing VolumeSnapshotContent object representing - an existing volume snapshot. This field should be set if - the snapshot already exists and only needs a representation - in Kubernetes. This field is immutable. - type: string - volumeSnapshotName: - description: volumeSnapshotName records the volumeSnapshot - name. - type: string - type: object - backupTool: - description: backupTool records information about backup files - generated by the backup tool. - properties: - checkpoint: - description: backup checkpoint, for incremental backup. - type: string - checksum: - description: checksum of backup file, generated by md5 or - sha1 or sha256. - type: string - filePath: - description: filePath records the file path of backup. - type: string - logFilePath: - description: logFilePath records the log file path of backup. - type: string - uploadTotalSize: - description: Backup upload total size. A string with capacity - units in the form of "1Gi", "1Mi", "1Ki". - type: string - volumeName: - description: volumeName records volume name of backup data - pvc. - type: string - type: object - target: - description: target records the target cluster metadata string, - which is in JSON format. - type: string - userContext: - additionalProperties: - type: string - description: userContext stores some loosely structured and extensible - information. - type: object - type: object - parentBackupName: - description: Records parentBackupName if backupType is incremental. + path: + description: path is the directory inside the backup repository where + the backup data is stored. It is an absolute path in the backup + repository. type: string persistentVolumeClaimName: - description: remoteVolume saves the backup data. + description: persistentVolumeClaimName is the name of the persistent + volume claim that is used to store the backup data. type: string phase: - description: BackupPhase The current phase. Valid values are New, - InProgress, Completed, Failed. + description: phase is the current state of the Backup. enum: - New - InProgress @@ -196,18 +511,209 @@ spec: - Failed - Deleting type: string - sourceCluster: - description: sourceCluster records the source cluster information - for this backup. - type: string startTimestamp: - description: Date/time when the backup started being processed. + description: startTimestamp records the time a backup was started. + The server's time is used for StartTimestamp. format: date-time type: string + target: + description: target records the target information for this backup. + properties: + connectionCredential: + description: connectionCredential specifies the connection credential + to connect to the target database cluster. + properties: + hostKey: + description: hostKey specifies the map key of the host in + the connection credential secret. + type: string + passwordKey: + default: password + description: passwordKey specifies the map key of the password + in the connection credential secret. + type: string + portKey: + default: port + description: portKey specifies the map key of the port in + the connection credential secret. + type: string + secretName: + description: secretName refers to the Secret object that contains + the connection credential. + type: string + usernameKey: + default: username + description: usernameKey specifies the map key of the user + in the connection credential secret. + type: string + type: object + podSelector: + description: podSelector is used to find the target pod. The volumes + of the target pod will be backed up. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + strategy: + default: Any + description: 'strategy specifies the strategy to select the + target pod when multiple pods are selected. Valid values + are: - All: select all pods that match the labelsSelector. + - Any: select any one pod that match the labelsSelector.' + enum: + - All + - Any + type: string + type: object + x-kubernetes-map-type: atomic + resources: + description: resources specifies the kubernetes resources to back + up. + properties: + excluded: + description: excluded is a slice of namespaced-scoped resource + type names to exclude in the kubernetes resources. The default + value is empty. + items: + type: string + type: array + included: + default: + - '*' + description: included is a slice of namespaced-scoped resource + type names to include in the kubernetes resources. The default + value is "*", which means all resource types will be included. + items: + type: string + type: array + selector: + description: selector is a metav1.LabelSelector to filter + the target kubernetes resources that need to be backed up. + If not set, will do not back up any kubernetes resources. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + serviceAccountName: + description: serviceAccountName specifies the service account + to run the backup workload. + type: string + type: object + timeRange: + description: timeRange records the time range of backed up data, for + PITR, this is the time range of recoverable data. + properties: + end: + description: end records the end time of backup. + format: date-time + type: string + start: + description: start records the start time of backup. + format: date-time + type: string + type: object totalSize: - description: Backup total size. A string with capacity units in the - form of "1Gi", "1Mi", "1Ki". + description: totalSize is the total size of backed up data size. A + string with capacity units in the format of "1Gi", "1Mi", "1Ki". type: string + volumeSnapshots: + description: volumeSnapshots records the volume snapshot status for + the action. + items: + properties: + contentName: + description: contentName is the name of the volume snapshot + content. + type: string + name: + description: name is the name of the volume snapshot. + type: string + size: + description: size is the size of the volume snapshot. + type: string + volumeName: + description: volumeName is the name of the volume. + type: string + type: object + type: array type: object type: object served: true diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backupschedules.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backupschedules.yaml new file mode 100644 index 00000000000..40d07aa5fc7 --- /dev/null +++ b/config/crd/bases/dataprotection.kubeblocks.io_backupschedules.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: backupschedules.dataprotection.kubeblocks.io +spec: + group: dataprotection.kubeblocks.io + names: + categories: + - kubeblocks + kind: BackupSchedule + listKind: BackupScheduleList + plural: backupschedules + shortNames: + - bs + singular: backupschedule + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: BackupSchedule is the Schema for the backupschedules API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupScheduleSpec defines the desired state of BackupSchedule. + properties: + backupPolicyName: + description: Which backupPolicy is applied to perform this backup. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + schedules: + description: schedules defines the list of backup schedules. + items: + properties: + backupMethod: + description: backupMethod specifies the backup method name that + is defined in backupPolicy. + type: string + cronExpression: + description: the cron expression for schedule, the timezone + is in UTC. see https://en.wikipedia.org/wiki/Cron. + type: string + enabled: + description: enabled specifies whether the backup schedule is + enabled or not. + type: boolean + retentionPeriod: + default: 7d + description: "retentionPeriod determines a duration up to which + the backup should be kept. controller will remove all backups + that are older than the RetentionPeriod. For example, RetentionPeriod + of `30d` will keep only the backups of last 30 days. Sample + duration format: - years: \t2y - months: \t6mo - days: \t\t30d + - hours: \t12h - minutes: \t30m You can also combine the above + durations. For example: 30d12h30m" + type: string + required: + - backupMethod + - cronExpression + type: object + minItems: 1 + type: array + startingDeadlineMinutes: + description: startingDeadlineMinutes defines the deadline in minutes + for starting the backup workload if it misses scheduled time for + any reason. + format: int64 + maximum: 1440 + minimum: 0 + type: integer + required: + - backupPolicyName + - schedules + type: object + status: + description: BackupScheduleStatus defines the observed state of BackupSchedule. + properties: + failureReason: + description: failureReason is an error that caused the backup to fail. + type: string + observedGeneration: + description: observedGeneration is the most recent generation observed + for this BackupSchedule. It refers to the BackupSchedule's generation, + which is updated on mutation by the API Server. + format: int64 + type: integer + phase: + description: phase describes the phase of the BackupSchedule. + type: string + schedules: + additionalProperties: + description: ScheduleStatus defines the status of each schedule. + properties: + failureReason: + description: failureReason is an error that caused the backup + to fail. + type: string + lastScheduleTime: + description: lastScheduleTime records the last time the backup + was scheduled. + format: date-time + type: string + lastSuccessfulTime: + description: lastSuccessfulTime records the last time the backup + was successfully completed. + format: date-time + type: string + phase: + description: phase describes the phase of the schedule. + type: string + type: object + description: schedules describes the status of each schedule. + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml deleted file mode 100644 index 5244e2b2203..00000000000 --- a/config/crd/bases/dataprotection.kubeblocks.io_backuptools.yaml +++ /dev/null @@ -1,330 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.12.1 - labels: - app.kubernetes.io/name: kubeblocks - name: backuptools.dataprotection.kubeblocks.io -spec: - group: dataprotection.kubeblocks.io - names: - categories: - - kubeblocks - kind: BackupTool - listKind: BackupToolList - plural: backuptools - singular: backuptool - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: BackupTool is the Schema for the backuptools API (defined by - provider) - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupToolSpec defines the desired state of BackupTool - properties: - backupCommands: - description: Array of command that apps can do database backup. from - invoke args the order of commands follows the order of array. - items: - type: string - type: array - deployKind: - default: job - description: 'which kind for run a backup tool, supported values: - job, statefulSet.' - enum: - - job - - statefulSet - type: string - env: - description: List of environment variables to set in the container. - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot - be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is - written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-preserve-unknown-fields: true - envFrom: - description: List of sources to populate environment variables in - the container. The keys defined within a source must be a C_IDENTIFIER. - All invalid keys will be reported as an event when the container - is starting. When a key exists in multiple sources, the value associated - with the last source will take precedence. Values defined by an - Env with a duplicate key will take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend to each key in - the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-preserve-unknown-fields: true - image: - description: Backup tool Container image name. - type: string - incrementalBackupCommands: - description: Array of command that apps can do database incremental - backup. like xtrabackup, that can performs an incremental backup - file. - items: - type: string - type: array - logical: - description: backup tool can support logical restore, in this case, - restore NOT RESTART database. - properties: - incrementalRestoreCommands: - description: Array of incremental restore commands. - items: - type: string - type: array - podScope: - default: All - description: 'podScope defines the pod scope for restore from - backup, supported values: - ''All'' will exec the restore command - on all pods. - ''ReadWrite'' will pick a ReadWrite pod to exec - the restore command.' - enum: - - All - - ReadWrite - type: string - restoreCommands: - description: Array of command that apps can perform database restore. - like xtrabackup, that can performs restore mysql from files. - items: - type: string - type: array - type: object - physical: - description: backup tool can support physical restore, in this case, - restore must be RESTART database. - properties: - incrementalRestoreCommands: - description: Array of incremental restore commands. - items: - type: string - type: array - relyOnLogfile: - description: relyOnLogfile defines whether the current recovery - relies on log files - type: boolean - restoreCommands: - description: Array of command that apps can perform database restore. - like xtrabackup, that can performs restore mysql from files. - items: - type: string - type: array - type: object - resources: - description: Compute Resources required by this container. Cannot - be updated. - properties: - claims: - description: "Claims lists the names of resources, defined in - spec.resourceClaims, that are used by this container. \n This - is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be set - for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims - of the Pod where this field is used. It makes that resource - available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: - default: file - description: the type of backup tool, file or pitr - enum: - - file - - pitr - type: string - required: - - backupCommands - - image - type: object - status: - description: BackupToolStatus defines the observed state of BackupTool - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml b/config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml deleted file mode 100644 index 12cdf01405b..00000000000 --- a/config/crd/bases/dataprotection.kubeblocks.io_restorejobs.yaml +++ /dev/null @@ -1,1795 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.12.1 - labels: - app.kubernetes.io/name: kubeblocks - name: restorejobs.dataprotection.kubeblocks.io -spec: - group: dataprotection.kubeblocks.io - names: - categories: - - kubeblocks - kind: RestoreJob - listKind: RestoreJobList - plural: restorejobs - singular: restorejob - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: STATUS - type: string - - jsonPath: .status.completionTimestamp - name: COMPLETION-TIME - type: date - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: RestoreJob is the Schema for the restorejobs API (defined by - User) - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: RestoreJobSpec defines the desired state of RestoreJob - properties: - backupJobName: - description: Specified one backupJob to restore. - type: string - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: the target database workload to restore - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. Pods - that match this label selector are counted to determine the - number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup policy - template. if still not set, the controller will check if any - system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: passwordKey the map key of the password in the - connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the connection - credential secret - type: string - required: - - name - type: object - required: - - labelsSelector - type: object - targetVolumeMounts: - description: array of restore volume mounts . - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When - not set, MountPropagationNone is used. This field is beta - in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the - container's volume should be mounted. Behaves similarly to - SubPath but environment variable references $(VAR_NAME) are - expanded using the container's environment. Defaults to "" - (volume's root). SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - minItems: 1 - type: array - x-kubernetes-preserve-unknown-fields: true - targetVolumes: - description: array of restore volumes . - items: - description: Volume represents a named volume in a pod that may - be accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'awsElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - properties: - fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'readOnly value true will force the readOnly - setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'volumeID is unique ID of the persistent disk - resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - required: - - volumeID - type: object - azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'cachingMode is the Host Caching mode: None, - Read Only, Read Write.' - type: string - diskName: - description: diskName is the Name of the data disk in the - blob storage - type: string - diskURI: - description: diskURI is the URI of data disk in the blob - storage - type: string - fsType: - description: fsType is Filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single blob - disk per storage account Managed: azure managed data - disk (only in managed availability set). defaults to shared' - type: string - readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. - properties: - readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of secret that contains - Azure Storage Account Name and Key - type: string - shareName: - description: shareName is the azure share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime - properties: - monitors: - description: 'monitors is Required: Monitors is a collection - of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: - type: string - type: array - path: - description: 'path is Optional: Used as the mounted root, - rather than the full Ceph tree, default is /' - type: string - readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'secretFile is Optional: SecretFile is the - path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - secretRef: - description: 'secretRef is Optional: SecretRef is reference - to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: 'user is optional: User is the rados user name, - default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'cinder represents a cinder volume attached and - mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to - be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'secretRef is optional: points to a secret - object containing parameters used to connect to OpenStack.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - description: 'volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'defaultMode is optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: items if unspecified, each key-value pair in - the Data field of the referenced ConfigMap will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the ConfigMap, the volume setup will error unless it is - marked optional. Paths must be relative and may not contain - the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: optional specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). - properties: - driver: - description: driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated - CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: nodePublishSecretRef is a reference to the - secret object containing sensitive information to pass - to the CSI driver to complete the CSI NodePublishVolume - and NodeUnpublishVolume calls. This field is optional, - and may be empty if no secret is required. If the secret - object contains more than one secret, all secret references - are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: readOnly specifies a read-only configuration - for the volume. Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: volumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a Optional: mode bits used to set - permissions on created files by default. Must be an octal - value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are - supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: 'Optional: mode bits used to set permissions - on this file, must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path - name of the file to be created. Must not be absolute - or contain the ''..'' path. Must be utf-8 encoded. - The first item of the relative path must not start - with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - emptyDir: - description: 'emptyDir represents a temporary directory that - shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'medium represents what type of storage medium - should back this directory. The default is "" which means - to use the node''s default medium. Must be an empty string - (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: 'sizeLimit is the total amount of local storage - required for this EmptyDir volume. The size limit is also - applicable for memory medium. The maximum usage on memory - medium EmptyDir would be the minimum value between the - SizeLimit specified here and the sum of memory limits - of all containers in a pod. The default is nil which means - that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - description: "ephemeral represents a volume that is handled - by a cluster storage driver. The volume's lifecycle is tied - to the pod that defines it - it will be created before the - pod starts, and deleted when the pod is removed. \n Use this - if: a) the volume is only needed while the pod runs, b) features - of normal volumes like restoring from snapshot or capacity - tracking are needed, c) the storage driver is specified through - a storage class, and d) the storage driver supports dynamic - volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource - for more information on the connection between this volume - type and PersistentVolumeClaim). \n Use PersistentVolumeClaim - or one of the vendor-specific APIs for volumes that persist - for longer than the lifecycle of an individual pod. \n Use - CSI for light-weight local ephemeral volumes if the CSI driver - is meant to be used that way - see the documentation of the - driver for more information. \n A pod can use both types of - ephemeral volumes and persistent volumes at the same time." - properties: - volumeClaimTemplate: - description: "Will be used to create a stand-alone PVC to - provision the volume. The pod in which this EphemeralVolumeSource - is embedded will be the owner of the PVC, i.e. the PVC - will be deleted together with the pod. The name of the - PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. - Pod validation will reject the pod if the concatenated - name is not valid for a PVC (for example, too long). \n - An existing PVC with that name that is not owned by the - pod will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC - is meant to be used by the pod, the PVC has to updated - with an owner reference to the pod once the pod exists. - Normally this should not be necessary, but it may be useful - when manually reconstructing a broken cluster. \n This - field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. \n Required, must - not be nil." - properties: - metadata: - description: May contain labels and annotations that - will be copied into the PVC when creating it. No other - fields are allowed and will be rejected during validation. - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: The specification for the PersistentVolumeClaim. - The entire content is copied unchanged into the PVC - that gets created from this template. The same fields - as in a PersistentVolumeClaim are also valid here. - properties: - accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified - data source. When the AnyVolumeDataSource feature - gate is enabled, dataSource contents will be copied - to dataSourceRef, and dataSourceRef contents will - be copied to dataSource when dataSourceRef.namespace - is not specified. If the namespace is specified, - then dataSourceRef will not be copied to dataSource.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, if - a non-empty volume is desired. This may be any - object from a non-empty API group (non core object) - or a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. - This field will replace the functionality of the - dataSource field and as such if both fields are - non-empty, they must have the same value. For - backwards compatibility, when namespace isn''t - specified in dataSourceRef, both fields (dataSource - and dataSourceRef) will be set to the same value - automatically if one of them is empty and the - other is non-empty. When namespace is specified - in dataSourceRef, dataSource isn''t set to the - same value and must be empty. There are three - important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types - of objects, dataSourceRef allows any non-core - object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping - them), dataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - * While dataSource only allows local objects, - dataSourceRef allows objects in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled. (Alpha) Using the - namespace field of dataSourceRef requires the - CrossNamespaceVolumeDataSource feature gate to - be enabled.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - namespace: - description: Namespace is the namespace of resource - being referenced Note that when a namespace - is specified, a gateway.networking.k8s.io/ReferenceGrant - object is required in the referent namespace - to allow that namespace's owner to accept - the reference. See the ReferenceGrant documentation - for details. (Alpha) This field requires the - CrossNamespaceVolumeDataSource feature gate - to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity recorded - in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." - items: - description: ResourceClaim references one - entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: selector is a label query over volumes - to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: 'storageClassName is the name of the - StorageClass required by the claim. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel resource that is - attached to a kubelet's host machine and then exposed to the - pod. - properties: - fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. TODO: how do we prevent errors in the - filesystem from compromising the machine' - type: string - lun: - description: 'lun is Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: FC target worldwide - names (WWNs)' - items: - type: string - type: array - wwids: - description: 'wwids Optional: FC volume world wide identifiers - (wwids) Either wwids or combination of targetWWNs and - lun must be set, but not both simultaneously.' - items: - type: string - type: array - type: object - flexVolume: - description: flexVolume represents a generic volume resource - that is provisioned/attached using an exec based plugin. - properties: - driver: - description: driver is the name of the driver to use for - this volume. - type: string - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends - on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this field holds extra - command options if any.' - type: object - readOnly: - description: 'readOnly is Optional: defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'secretRef is Optional: secretRef is reference - to the secret object containing sensitive information - to pass to the plugin scripts. This may be empty if no - secret object is specified. If the secret object contains - more than one secret, all secrets are passed to the plugin - scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: datasetName is Name of the dataset stored as - metadata -> name on the dataset for Flocker should be - considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of the dataset. This - is unique identifier of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'gcePersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'fsType is filesystem type of the volume that - you want to mount. Tip: Ensure that the filesystem type - is supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'pdName is unique name of the PD resource in - GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'gitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision - a container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: directory is the target directory name. Must - not contain or start with '..'. If '.' is supplied, the - volume directory will be the git repository. Otherwise, - if specified, the volume will contain the git repository - in the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash for the specified - revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'glusterfs represents a Glusterfs mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'endpoints is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'readOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to - false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'hostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'path of the directory on the host. If the - path is a symlink, it will follow the link to the real - path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'iscsi represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to - the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines whether support iSCSI - Discovery CHAP authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether support iSCSI - Session CHAP authentication - type: boolean - fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - initiatorName: - description: initiatorName is the custom iSCSI Initiator - Name. If initiatorName is specified with iscsiInterface - simultaneously, new iSCSI interface : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iscsiInterface is the interface Name that uses - an iSCSI transport. Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: portals is the iSCSI Target Portal List. The - portal is either an IP or ip_addr:port if the port is - other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - readOnly: - description: readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret for iSCSI target - and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - description: targetPortal is iSCSI Target Portal. The Portal - is either an IP or ip_addr:port if the port is other than - default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'name of the volume. Must be a DNS_LABEL and unique - within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'nfs represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'readOnly here will force the NFS export to - be mounted with read-only permissions. Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'persistentVolumeClaimVolumeSource represents a - reference to a PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'claimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: readOnly Will force the ReadOnly setting in - VolumeMounts. Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - pdID: - description: pdID is the ID that identifies Photon Controller - persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one resources secrets, - configmaps, and downward API - properties: - defaultMode: - description: defaultMode are the mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Directories within the path are - not affected by this setting. This might be in conflict - with other options that affect the file mode, like fsGroup, - and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: sources is the list of volume projections - items: - description: Projection that may be projected along with - other supported volume types - properties: - configMap: - description: configMap information about the configMap - data to project - properties: - items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the ConfigMap, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about the downwardAPI - data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, - defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: 'Optional: mode bits used to - set permissions on this file, must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' - path. Must be utf-8 encoded. The first - item of the relative path must not start - with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu - and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults - to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to - select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - secret: - description: secret information about the secret data - to project - properties: - items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the Secret, the volume setup - will error unless it is marked optional. Paths - must be relative and may not contain the '..' - path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: optional field specify whether the - Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information about - the serviceAccountToken data to project - properties: - audience: - description: audience is the intended audience - of the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, the - kubelet volume plugin will proactively rotate - the service account token. The kubelet will - start trying to rotate the token if the token - is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. - format: int64 - type: integer - path: - description: path is the path relative to the - mount point of the file to project the token - into. - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime - properties: - group: - description: group to map volume access to Default is no - group - type: string - readOnly: - description: readOnly here will force the Quobyte volume - to be mounted with read-only permissions. Defaults to - false. - type: boolean - registry: - description: registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: user to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'rbd represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - image: - description: 'image is the rados image name. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - items: - type: string - type: array - pool: - description: 'pool is the rados pool name. Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'secretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is - nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: 'user is the rados user name. Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. - properties: - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: gateway is the host address of the ScaleIO - API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name of the ScaleIO - Protection Domain for the configured storage. - type: string - readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: secretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - description: sslEnabled Flag enable/disable SSL communication - with Gateway, default false - type: boolean - storageMode: - description: storageMode indicates whether the storage for - a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO Storage Pool associated - with the protection domain. - type: string - system: - description: system is the name of the storage system as - configured in ScaleIO. - type: string - volumeName: - description: volumeName is the name of a volume already - created in the ScaleIO system that is associated with - this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'defaultMode is Optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: items If unspecified, each key-value pair in - the Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the Secret, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. - type: string - required: - - key - - path - type: object - type: array - optional: - description: optional field specify whether the Secret or - its keys must be defined - type: boolean - secretName: - description: 'secretName is the name of the secret in the - pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: secretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - description: volumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within - a namespace. - type: string - volumeNamespace: - description: volumeNamespace specifies the scope of the - volume within StorageOS. If no namespace is specified - then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS - for tighter integration. Set VolumeName to any name to - override the default behaviour. Set to "default" if you - are not using namespaces within StorageOS. Namespaces - that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: fsType is filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage Policy Based - Management (SPBM) profile ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the storage Policy Based - Management (SPBM) profile name. - type: string - volumePath: - description: volumePath is the path that identifies vSphere - volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - minItems: 1 - type: array - x-kubernetes-preserve-unknown-fields: true - required: - - backupJobName - - target - - targetVolumeMounts - - targetVolumes - type: object - status: - description: RestoreJobStatus defines the observed state of RestoreJob - properties: - completionTimestamp: - description: Date/time when the backup finished being processed. - format: date-time - type: string - expiration: - description: The date and time when the Backup is eligible for garbage - collection. 'null' means the Backup is NOT be cleaned except delete - manual. - format: date-time - type: string - failureReason: - description: Job failed reason. - type: string - phase: - description: RestoreJobPhase The current phase. Valid values are New, - InProgressPhy, InProgressLogic, Completed, Failed. - enum: - - New - - InProgressPhy - - InProgressLogic - - Completed - - Failed - type: string - startTimestamp: - description: Date/time when the backup started being processed. - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/config/crd/bases/dataprotection.kubeblocks.io_restores.yaml b/config/crd/bases/dataprotection.kubeblocks.io_restores.yaml new file mode 100644 index 00000000000..a36a9f042d4 --- /dev/null +++ b/config/crd/bases/dataprotection.kubeblocks.io_restores.yaml @@ -0,0 +1,2522 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: restores.dataprotection.kubeblocks.io +spec: + group: dataprotection.kubeblocks.io + names: + categories: + - kubeblocks + - all + kind: Restore + listKind: RestoreList + plural: restores + singular: restore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backup.name + name: BACKUP + type: string + - description: Point in time for restoring + jsonPath: .spec.restoreTime + name: RESTORE-TIME + type: string + - description: Restore Status. + jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .status.duration + name: DURATION + type: string + - jsonPath: .metadata.creationTimestamp + name: CREATE-TIME + type: string + - jsonPath: .status.completionTimestamp + name: COMPLETION-TIME + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Restore is the Schema for the restores API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RestoreSpec defines the desired state of Restore + properties: + backup: + description: 'backup name, the following behavior based on the backup + type: 1. Full: will be restored the full backup directly. 2. Incremental: + will be restored sequentially from the most recent full backup of + this incremental backup. 3. Differential: will be restored sequentially + from the parent backup of the differential backup. 4. Continuous: + will find the most recent full backup at this time point and the + input continuous backup to restore.' + properties: + name: + description: backup name + type: string + namespace: + description: backup namespace + type: string + required: + - name + - namespace + type: object + x-kubernetes-validations: + - message: forbidden to update spec.backupName + rule: self == oldSelf + containerResources: + description: specified the required resources of restore job's container. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + env: + description: 'list of environment variables to set in the container + for restore and will be merged with the env of Backup and ActionSet. + the priority of merging is as follows: Restore env > Backup env + > ActionSet env.' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the + string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + prepareDataConfig: + description: configuration for the action of "prepareData" phase, + including the persistent volume claims that need to be restored + and scheduling strategy of temporary recovery pod. + properties: + dataSourceRef: + description: dataSourceRef describes the configuration when using + `persistentVolumeClaim.spec.dataSourceRef` method for restoring. + it describes the source volume of the backup targetVolumes and + how to mount path in the restoring container. + properties: + mountPath: + description: mountPath path within the restoring container + at which the volume should be mounted. + type: string + volumeSource: + description: volumeSource describes the volume will be restored + from the specified volume of the backup targetVolumes. required + if the backup uses volume snapshot. + type: string + type: object + x-kubernetes-validations: + - message: at least one exists for volumeSource and mountPath. + rule: self.volumeSource != '' || self.mountPath !='' + - message: forbidden to update spec.prepareDataConfig.dataSourceRef + rule: self == oldSelf + schedulingSpec: + description: scheduling spec for restoring pod. + properties: + affinity: + description: affinity is a group of affinity scheduling rules. + refer to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term + matches all objects with implicit weight 0 (i.e. + it's a no-op). A null preferred scheduling term + matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to an update), the system may or may not try + to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to a pod label update), the system may or may + not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to + the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + anti-affinity requirements specified by this field + cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may + or may not try to eventually evict the pod from + its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeName: + description: nodeName is a request to schedule this pod onto + a specific node. If it is non-empty, the scheduler simply + schedules this pod onto that node, assuming that it fits + resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match + a node''s labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + schedulerName: + description: If specified, the pod will be dispatched by specified + scheduler. If not specified, the pod will be dispatched + by default scheduler. + type: string + tolerations: + description: the restoring pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: topologySpreadConstraints describes how a group + of pods ought to spread across topology domains. Scheduler + will schedule pods in a way which abides by the constraints. + refer to https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching + pods. Pods that match this label selector are counted + to determine the number of pods in their corresponding + topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys + to select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are ANDed with + labelSelector to select the group of existing pods + over which spreading will be calculated for the incoming + pod. The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. MatchLabelKeys cannot be set when + LabelSelector isn't set. Keys that don't exist in + the incoming pod labels will be ignored. A null or + empty list means only match against labelSelector. + \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which + pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the + number of matching pods in the target topology and + the global minimum. The global minimum is the minimum + number of matching pods in an eligible domain or zero + if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to + 1, and pods with the same labelSelector spread as + 2/2/1: In this case, the global minimum is 1. | zone1 + | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 + to become 2/2/2; scheduling it onto zone1(zone2) would + make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto + any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default value + is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number + of eligible domains. When the number of eligible domains + with matching topology keys is less than minDomains, + Pod Topology Spread treats \"global minimum\" as 0, + and then the calculation of Skew is performed. And + when the number of eligible domains with matching + topology keys equals or greater than minDomains, this + value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to + those domains. If value is nil, the constraint behaves + as if MinDomains is equal to 1. Valid values are integers + greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone + cluster, MaxSkew is set to 2, MinDomains is set to + 5 and pods with the same labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), + so \"global minimum\" is treated as 0. In this situation, + new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod + is scheduled to any of the three zones, it will violate + MaxSkew. \n This is a beta field and requires the + MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will + treat Pod's nodeAffinity/nodeSelector when calculating + pod topology spread skew. Options are: - Honor: only + nodes matching nodeAffinity/nodeSelector are included + in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + \n If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will + treat node taints when calculating pod topology spread + skew. Options are: - Honor: nodes without taints, + along with tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: node taints + are ignored. All nodes are included. \n If this value + is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", and try + to put balanced number of pods into each bucket. We + define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose + nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal + with a pod if it doesn''t satisfy the spread constraint. + - DoNotSchedule (default) tells the scheduler not + to schedule it. - ScheduleAnyway tells the scheduler + to schedule the pod in any location, but giving higher + precedence to topologies that would help reduce the + skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node + assignment for that pod would violate "MaxSkew" on + some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector + spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P + | P | P | If WhenUnsatisfiable is set to DoNotSchedule, + incoming pod can only be scheduled to zone2(zone3) + to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) + satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make + it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-validations: + - message: forbidden to update spec.prepareDataConfig.schedulingSpec + rule: self == oldSelf + volumeClaimManagementPolicy: + default: Parallel + description: 'VolumeClaimManagementPolicy defines recovery strategy + for persistent volume claim. supported policies are as follows: + 1. Parallel: parallel recovery of persistent volume claim. 2. + Serial: restore the persistent volume claim in sequence, and + wait until the previous persistent volume claim is restored + before restoring a new one.' + enum: + - Parallel + - Serial + type: string + volumeClaims: + description: volumeClaims defines the persistent Volume claims + that need to be restored and mount them together into the restore + job. these persistent Volume claims will be created if not exist. + items: + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + mountPath: + description: mountPath path within the restoring container + at which the volume should be mounted. + type: string + volumeClaimSpec: + description: volumeClaimSpec defines the desired characteristics + of a persistent volume claim. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on + the contents of the specified data source. When the + AnyVolumeDataSource feature gate is enabled, dataSource + contents will be copied to dataSourceRef, and dataSourceRef + contents will be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, then + dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object from + which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the dataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t specified + in dataSourceRef, both fields (dataSource and dataSourceRef) + will be set to the same value automatically if one + of them is empty and the other is non-empty. When + namespace is specified in dataSourceRef, dataSource + isn''t set to the same value and must be empty. There + are three important differences between dataSource + and dataSourceRef: * While dataSource only allows + two specific types of objects, dataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + * While dataSource only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) Using this + field requires the AnyVolumeDataSource feature gate + to be enabled. (Alpha) Using the namespace field of + dataSourceRef requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is + specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace to + allow that namespace's owner to accept the reference. + See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the + status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field and + requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can + only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the + Pod where this field is used. It makes that + resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + volumeSource: + description: volumeSource describes the volume will be restored + from the specified volume of the backup targetVolumes. + required if the backup uses volume snapshot. + type: string + required: + - metadata + - volumeClaimSpec + type: object + x-kubernetes-validations: + - message: at least one exists for volumeSource and mountPath. + rule: self.volumeSource != '' || self.mountPath !='' + type: array + x-kubernetes-validations: + - message: forbidden to update spec.prepareDataConfig.volumeClaims + rule: self == oldSelf + volumeClaimsTemplate: + description: volumeClaimsTemplate defines a template to build + persistent Volume claims that need to be restored. these claims + will be created in an orderly manner based on the number of + replicas or reused if already exist. + properties: + replicas: + description: the replicas of persistent volume claim which + need to be created and restored. the format of created claim + name is "-". + format: int32 + minimum: 1 + type: integer + startingIndex: + description: the starting index for the created persistent + volume claim by according to template. minimum is 0. + format: int32 + minimum: 0 + type: integer + templates: + description: templates is a list of volume claims. + items: + properties: + metadata: + description: 'Standard object''s metadata. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + mountPath: + description: mountPath path within the restoring container + at which the volume should be mounted. + type: string + volumeClaimSpec: + description: volumeClaimSpec defines the desired characteristics + of a persistent volume claim. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + volumeSource: + description: volumeSource describes the volume will + be restored from the specified volume of the backup + targetVolumes. required if the backup uses volume + snapshot. + type: string + required: + - metadata + - volumeClaimSpec + type: object + x-kubernetes-validations: + - message: at least one exists for volumeSource and mountPath. + rule: self.volumeSource != '' || self.mountPath !='' + type: array + required: + - replicas + - templates + type: object + x-kubernetes-validations: + - message: forbidden to update spec.prepareDataConfig.volumeClaimsTemplate + rule: self == oldSelf + required: + - volumeClaimManagementPolicy + type: object + readyConfig: + description: configuration for the action of "postReady" phase. + properties: + connectCredential: + description: credential template used for creating a connection + credential + properties: + hostKey: + default: host + description: hostKey the map key of the host in the connection + credential secret + type: string + passwordKey: + default: password + description: passwordKey the map key of the password in the + connection credential secret + type: string + portKey: + default: port + description: portKey the map key of the port in the connection + credential secret + type: string + secretName: + description: the secret name + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + usernameKey: + default: username + description: usernameKey the map key of the user in the connection + credential secret + type: string + required: + - passwordKey + - secretName + - usernameKey + type: object + execAction: + description: configuration for exec action. + properties: + target: + description: execActionTarget defines the pods that need to + be executed for the exec action. will execute on all pods + that meet the conditions. + properties: + podSelector: + description: kubectl exec in all selected pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - podSelector + type: object + type: object + jobAction: + description: configuration for job action. + properties: + target: + description: jobActionTarget defines the pod that need to + be executed for the job action. will select a pod that meets + the conditions to execute. + properties: + podSelector: + description: select one of the pods which selected by + labels to build the job spec, such as mount required + volumes and inject built-in env of the selected pod. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + volumeMounts: + description: volumeMounts defines which volumes of the + selected pod need to be mounted on the restoring pod. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + required: + - podSelector + type: object + required: + - target + type: object + readinessProbe: + description: periodic probe of the service readiness. controller + will perform postReadyHooks of BackupScript.spec.restore after + the service readiness when readinessProbe is configured. + properties: + exec: + description: exec specifies the action to take. + properties: + command: + description: refer to container command. + items: + type: string + type: array + image: + description: refer to container image. + type: string + required: + - command + - image + type: object + initialDelaySeconds: + description: number of seconds after the container has started + before probe is initiated. + minimum: 0 + type: integer + periodSeconds: + default: 5 + description: how often (in seconds) to perform the probe. + defaults to 5 second, minimum value is 1. + minimum: 1 + type: integer + timeoutSeconds: + default: 30 + description: number of seconds after which the probe times + out. defaults to 30 second, minimum value is 1. + minimum: 1 + type: integer + required: + - exec + type: object + type: object + x-kubernetes-validations: + - message: at least one exists for jobAction and execAction. + rule: has(self.jobAction) || has(self.execAction) + resources: + description: restore the specified resources of kubernetes. + properties: + included: + description: will restore the specified resources + items: + properties: + groupResource: + type: string + labelSelector: + description: select the specified resource for recovery + by label. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - groupResource + type: object + type: array + type: object + x-kubernetes-validations: + - message: forbidden to update spec.resources + rule: self == oldSelf + restoreTime: + description: restore according to a specified point in time. + pattern: ^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$ + type: string + x-kubernetes-validations: + - message: forbidden to update spec.restoreTime + rule: self == oldSelf + serviceAccountName: + description: service account name which needs for recovery pod. + type: string + required: + - backup + type: object + status: + description: RestoreStatus defines the observed state of Restore + properties: + actions: + description: recorded all restore actions performed. + properties: + postReady: + description: record the actions for postReady phase. + items: + properties: + backupName: + description: which backup's restore action belongs to. + type: string + endTime: + description: endTime is the completion time for the restore + job. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the object condition. + type: string + name: + description: name describes the name of the recovery action + based on the current backup. + type: string + objectKey: + description: the execution object of the restore action. + type: string + startTime: + description: startTime is the start time for the restore + job. + format: date-time + type: string + status: + description: the status of this action. + enum: + - Processing + - Completed + - Failed + type: string + required: + - backupName + - name + - objectKey + type: object + type: array + prepareData: + description: record the actions for prepareData phase. + items: + properties: + backupName: + description: which backup's restore action belongs to. + type: string + endTime: + description: endTime is the completion time for the restore + job. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the object condition. + type: string + name: + description: name describes the name of the recovery action + based on the current backup. + type: string + objectKey: + description: the execution object of the restore action. + type: string + startTime: + description: startTime is the start time for the restore + job. + format: date-time + type: string + status: + description: the status of this action. + enum: + - Processing + - Completed + - Failed + type: string + required: + - backupName + - name + - objectKey + type: object + type: array + type: object + completionTimestamp: + description: Date/time when the restore finished being processed. + format: date-time + type: string + conditions: + description: describe current state of restore API Resource, like + warning. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + duration: + description: The duration time of restore execution. When converted + to a string, the form is "1h2m0.5s". + type: string + phase: + description: RestorePhase The current phase. Valid values are Running, + Completed, Failed, Deleting. + enum: + - Running + - Completed + - Failed + - Deleting + type: string + startTimestamp: + description: Date/time when the restore started being processed. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c03aa316c33..d6b2f4c3947 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -8,16 +8,15 @@ resources: - bases/apps.kubeblocks.io_clusterversions.yaml - bases/apps.kubeblocks.io_configconstraints.yaml - bases/apps.kubeblocks.io_opsrequests.yaml -- bases/dataprotection.kubeblocks.io_backuptools.yaml - bases/dataprotection.kubeblocks.io_backuppolicies.yaml - bases/dataprotection.kubeblocks.io_backups.yaml -- bases/dataprotection.kubeblocks.io_restorejobs.yaml - bases/extensions.kubeblocks.io_addons.yaml - bases/apps.kubeblocks.io_componentresourceconstraints.yaml - bases/apps.kubeblocks.io_componentclassdefinitions.yaml - bases/workloads.kubeblocks.io_replicatedstatemachines.yaml - bases/storage.kubeblocks.io_storageproviders.yaml - bases/dataprotection.kubeblocks.io_backuprepos.yaml +- bases/dataprotection.kubeblocks.io_restores.yaml - bases/apps.kubeblocks.io_configurations.yaml - bases/apps.kubeblocks.io_servicedescriptors.yaml #+kubebuilder:scaffold:crdkustomizeresource @@ -31,7 +30,6 @@ patchesStrategicMerge: #- patches/webhook_in_backuptools.yaml #- patches/webhook_in_backuppolicies.yaml #- patches/webhook_in_backups.yaml -#- patches/webhook_in_restorejobs.yaml #- patches/webhook_in_backuppolicytemplates.yaml #- patches/webhook_in_opsrequests.yaml #- patches/webhook_in_reconfigurerequests.yaml @@ -44,6 +42,7 @@ patchesStrategicMerge: #- patches/webhook_in_replicatedstatemachines.yaml #- patches/webhook_in_storageproviders.yaml #- patches/webhook_in_backuprepos.yaml +#- patches/webhook_in_restores.yaml #- patches/webhook_in_configurations.yaml #- patches/webhook_in_servicedescriptors.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch @@ -56,7 +55,6 @@ patchesStrategicMerge: #- patches/cainjection_in_backuptools.yaml #- patches/cainjection_in_backuppolicies.yaml #- patches/cainjection_in_backups.yaml -#- patches/cainjection_in_restorejobs.yaml #- patches/cainjection_in_backuppolicytemplates.yaml #- patches/cainjection_in_opsrequests.yaml #- patches/cainjection_in_reconfigurerequests.yaml @@ -69,6 +67,7 @@ patchesStrategicMerge: #- patches/cainjection_in_replicatedstatemachines.yaml #- patches/cainjection_in_storageproviders.yaml #- patches/cainjection_in_backuprepos.yaml +#- patches/cainjection_in_restores.yaml #- patches/cainjection_in_configurations.yaml #- patches/cainjection_in_servicedescriptors.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch diff --git a/config/crd/patches/cainjection_in_dataprotection_restorejobs.yaml b/config/crd/patches/cainjection_in_restores.yaml similarity index 83% rename from config/crd/patches/cainjection_in_dataprotection_restorejobs.yaml rename to config/crd/patches/cainjection_in_restores.yaml index f8863965617..dc4c069a797 100644 --- a/config/crd/patches/cainjection_in_dataprotection_restorejobs.yaml +++ b/config/crd/patches/cainjection_in_restores.yaml @@ -4,4 +4,4 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: restorejobs.dataprotection.kubeblocks.io + name: restores.dataprotection.kubeblocks.io diff --git a/config/crd/patches/webhook_in_dataprotection_restorejobs.yaml b/config/crd/patches/webhook_in_restores.yaml similarity index 88% rename from config/crd/patches/webhook_in_dataprotection_restorejobs.yaml rename to config/crd/patches/webhook_in_restores.yaml index c138579306e..6816d399a6f 100644 --- a/config/crd/patches/webhook_in_dataprotection_restorejobs.yaml +++ b/config/crd/patches/webhook_in_restores.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: restorejobs.dataprotection.kubeblocks.io + name: restores.dataprotection.kubeblocks.io spec: conversion: strategy: Webhook diff --git a/config/rbac/dataprotection_restorejob_editor_role.yaml b/config/rbac/dataprotection_estore_editor_role.yaml similarity index 70% rename from config/rbac/dataprotection_restorejob_editor_role.yaml rename to config/rbac/dataprotection_estore_editor_role.yaml index 8b7fad4d512..e232ebedc31 100644 --- a/config/rbac/dataprotection_restorejob_editor_role.yaml +++ b/config/rbac/dataprotection_estore_editor_role.yaml @@ -1,13 +1,13 @@ -# permissions for end users to edit restorejobs. +# permissions for end users to edit restores. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: restorejob-editor-role + name: restore-editor-role rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs + - restores verbs: - create - delete @@ -19,6 +19,6 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/status + - restores/status verbs: - get diff --git a/config/rbac/dataprotection_restorejob_viewer_role.yaml b/config/rbac/dataprotection_restore_viewer_role.yaml similarity index 67% rename from config/rbac/dataprotection_restorejob_viewer_role.yaml rename to config/rbac/dataprotection_restore_viewer_role.yaml index e11a21eda8c..ce5953eb959 100644 --- a/config/rbac/dataprotection_restorejob_viewer_role.yaml +++ b/config/rbac/dataprotection_restore_viewer_role.yaml @@ -1,13 +1,13 @@ -# permissions for end users to view restorejobs. +# permissions for end users to view restores. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: restorejob-viewer-role + name: restore-viewer-role rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs + - restores verbs: - get - list @@ -15,6 +15,6 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/status + - restores/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 716b0ea9ce9..57454b4073c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -566,6 +566,32 @@ rules: - services/status verbs: - get +- apiGroups: + - dataprotection.kubeblocks.io + resources: + - actionsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - dataprotection.kubeblocks.io + resources: + - actionsets/finalizers + verbs: + - update +- apiGroups: + - dataprotection.kubeblocks.io + resources: + - actionsets/status + verbs: + - get + - patch + - update - apiGroups: - dataprotection.kubeblocks.io resources: @@ -649,7 +675,7 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - backuptools + - backupschedules verbs: - create - delete @@ -661,13 +687,13 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - backuptools/finalizers + - backupschedules/finalizers verbs: - update - apiGroups: - dataprotection.kubeblocks.io resources: - - backuptools/status + - backupschedules/status verbs: - get - patch @@ -675,7 +701,7 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs + - restores verbs: - create - delete @@ -687,13 +713,13 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/finalizers + - restores/finalizers verbs: - update - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/status + - restores/status verbs: - get - patch diff --git a/config/samples/dataprotection_v1alpha1_restore.yaml b/config/samples/dataprotection_v1alpha1_restore.yaml new file mode 100644 index 00000000000..db589a76597 --- /dev/null +++ b/config/samples/dataprotection_v1alpha1_restore.yaml @@ -0,0 +1,6 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Restore +metadata: + name: restore-sample +spec: + # TODO(user): Add fields here diff --git a/controllers/apps/class_controller_test.go b/controllers/apps/class_controller_test.go index 25a427d2f05..194480afbfd 100644 --- a/controllers/apps/class_controller_test.go +++ b/controllers/apps/class_controller_test.go @@ -22,6 +22,7 @@ package apps import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" diff --git a/controllers/apps/cluster_controller.go b/controllers/apps/cluster_controller.go index ca41cbc5883..9e0cec3815e 100644 --- a/controllers/apps/cluster_controller.go +++ b/controllers/apps/cluster_controller.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" @@ -179,12 +179,12 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct &ValidateEnableLogsTransformer{}, // create cluster connection credential secret object &ClusterCredentialTransformer{}, - // handle restore + // handle restore before ComponentTransformer &RestoreTransformer{Client: r.Client}, // create all components objects &ComponentTransformer{Client: r.Client}, // transform backupPolicy tpl to backuppolicy.dataprotection.kubeblocks.io - &BackupPolicyTPLTransformer{}, + &BackupPolicyTplTransformer{}, // handle rbac for pod &RBACTransformer{}, // add our finalizer to all objects @@ -226,8 +226,10 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.ConfigMap{}). Owns(&corev1.PersistentVolumeClaim{}). Owns(&policyv1.PodDisruptionBudget{}). - Owns(&dataprotectionv1alpha1.BackupPolicy{}). - Owns(&dataprotectionv1alpha1.Backup{}). + Owns(&dpv1alpha1.BackupPolicy{}). + Owns(&dpv1alpha1.BackupSchedule{}). + Owns(&dpv1alpha1.Backup{}). + Owns(&dpv1alpha1.Restore{}). Owns(&batchv1.Job{}). Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(r.filterClusterResources)) diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 56620849518..d694b534c78 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -29,13 +29,13 @@ import ( "strings" "time" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "github.com/sethvargo/go-password/password" "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" @@ -51,14 +51,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/controllers/apps/components" "github.com/apecloud/kubeblocks/internal/common" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" viper "github.com/apecloud/kubeblocks/internal/viperx" lorry "github.com/apecloud/kubeblocks/lorry/client" @@ -66,6 +68,10 @@ import ( const ( backupPolicyTPLName = "test-backup-policy-template-mysql" + backupMethodName = "test-backup-method" + vsBackupMethodName = "test-vs-backup-method" + actionSetName = "test-action-set" + vsActionSetName = "test-vs-action-set" ) var ( @@ -125,7 +131,7 @@ var _ = Describe("Cluster Controller", func() { consensusCompDefName = "consensus" replicationCompName = "replication" replicationCompDefName = "replication" - backupToolName = "test-backup-tool" + actionSetName = "test-actionset" ) var ( @@ -174,12 +180,12 @@ var _ = Describe("Cluster Controller", func() { // namespaced testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml) - testapps.ClearResources(&testCtx, generics.BackupSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.BackupPolicySignature, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupPolicySignature, true, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.VolumeSnapshotSignature, true, inNS) // non-namespaced testapps.ClearResources(&testCtx, generics.BackupPolicyTemplateSignature, ml) - testapps.ClearResources(&testCtx, generics.BackupToolSignature, ml) + testapps.ClearResources(&testCtx, generics.ActionSetSignature, ml) testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml) resetTestContext() } @@ -378,9 +384,10 @@ var _ = Describe("Cluster Controller", func() { By("Mocking a retained backup") backupPolicyName := "test-backup-policy" backupName := "test-backup" - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). + backupMethod := "test-backup-method" + backup := testdp.NewBackupFactory(testCtx.DefaultNamespace, backupName). SetBackupPolicyName(backupPolicyName). - SetBackupType(dataprotectionv1alpha1.BackupTypeDataFile). + SetBackupMethod(backupMethod). SetLabels(map[string]string{constant.AppInstanceLabelKey: clusterKey.Name, constant.BackupProtectionLabelKey: constant.BackupRetain}). WithRandomName(). Create(&testCtx).GetObject() @@ -395,7 +402,7 @@ var _ = Describe("Cluster Controller", func() { Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, false)).Should(Succeed()) By("Checking backup should exist") - Eventually(testapps.CheckObjExists(&testCtx, backupKey, &dataprotectionv1alpha1.Backup{}, true)).Should(Succeed()) + Eventually(testapps.CheckObjExists(&testCtx, backupKey, &dpv1alpha1.Backup{}, true)).Should(Succeed()) } testDoNotTerminate := func(compName, compDefName string) { @@ -485,19 +492,24 @@ var _ = Describe("Cluster Controller", func() { return fmt.Sprintf("%s-%s-%s-%d", vctName, clusterKey.Name, compName, i) } - createPVC := func(clusterName, pvcName, compName, storageClassName string) { - testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcName, clusterName, compName, "data"). + createPVC := func(clusterName, pvcName, compName, storageSize, storageClassName string) { + if storageSize == "" { + storageSize = "1Gi" + } + clusterBytes, _ := json.Marshal(clusterObj) + testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcName, clusterName, + compName, testapps.DataVolumeName). AddLabelsInMap(map[string]string{ constant.AppInstanceLabelKey: clusterName, constant.KBAppComponentLabelKey: compName, constant.AppManagedByLabelKey: constant.AppName, - }). - SetStorage("1Gi"). + }).AddAnnotations(constant.LastAppliedClusterAnnotationKey, string(clusterBytes)). + SetStorage(storageSize). SetStorageClass(storageClassName). CheckedCreate(&testCtx) } - mockComponentPVCsBound := func(comp *appsv1alpha1.ClusterComponentSpec, replicas int, create bool, storageClassName string) { + mockComponentPVCsAndBound := func(comp *appsv1alpha1.ClusterComponentSpec, replicas int, create bool, storageClassName string) { for i := 0; i < replicas; i++ { for _, vct := range comp.VolumeClaimTemplates { pvcKey := types.NamespacedName{ @@ -505,7 +517,7 @@ var _ = Describe("Cluster Controller", func() { Name: getPVCName(vct.Name, comp.Name, i), } if create { - createPVC(clusterKey.Name, pvcKey.Name, comp.Name, storageClassName) + createPVC(clusterKey.Name, pvcKey.Name, comp.Name, vct.Spec.Resources.Requests.Storage().String(), storageClassName) } Eventually(testapps.CheckObjExists(&testCtx, pvcKey, &corev1.PersistentVolumeClaim{}, true)).Should(Succeed()) @@ -557,7 +569,7 @@ var _ = Describe("Cluster Controller", func() { horizontalScaleComp := func(updatedReplicas int, comp *appsv1alpha1.ClusterComponentSpec, storageClassName string, policy *appsv1alpha1.HorizontalScalePolicy) { By("Mocking component PVCs to bound") - mockComponentPVCsBound(comp, int(comp.Replicas), true, storageClassName) + mockComponentPVCsAndBound(comp, int(comp.Replicas), true, storageClassName) By("Checking rsm replicas right") rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) @@ -595,22 +607,24 @@ var _ = Describe("Cluster Controller", func() { return } + ml := client.MatchingLabels{ + constant.AppInstanceLabelKey: clusterKey.Name, + constant.KBAppComponentLabelKey: comp.Name, + constant.KBManagedByKey: "cluster", + } if policy != nil { By(fmt.Sprintf("Checking backup of component %s created", comp.Name)) Eventually(testapps.List(&testCtx, generics.BackupSignature, - client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: comp.Name, - }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(1)) + ml, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(1)) backupKey := types.NamespacedName{Name: fmt.Sprintf("%s-%s-scaling", clusterKey.Name, comp.Name), Namespace: testCtx.DefaultNamespace} By("Mocking backup status to completed") - Expect(testapps.GetAndChangeObjStatus(&testCtx, backupKey, func(backup *dataprotectionv1alpha1.Backup) { - backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted + Expect(testapps.GetAndChangeObjStatus(&testCtx, backupKey, func(backup *dpv1alpha1.Backup) { + backup.Status.Phase = dpv1alpha1.BackupPhaseCompleted backup.Status.PersistentVolumeClaimName = "backup-data" - backup.Status.BackupToolName = backupToolName + testdp.MockBackupStatusMethod(backup, testapps.DataVolumeName) })()).Should(Succeed()) if testk8s.IsMockVolumeSnapshotEnabled(&testCtx, storageClassName) { @@ -621,7 +635,7 @@ var _ = Describe("Cluster Controller", func() { Name: backupKey.Name, Namespace: backupKey.Namespace, Labels: map[string]string{ - constant.DataProtectionLabelBackupNameKey: backupKey.Name, + dptypes.DataProtectionLabelBackupNameKey: backupKey.Name, }}, Spec: snapshotv1.VolumeSnapshotSpec{ Source: snapshotv1.VolumeSnapshotSource{ @@ -639,60 +653,17 @@ var _ = Describe("Cluster Controller", func() { } } - By("Checking pvc created") - Eventually(testapps.List(&testCtx, generics.PersistentVolumeClaimSignature, - client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: comp.Name, - }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(updatedReplicas * len(comp.VolumeClaimTemplates))) + By("Mock PVCs and set status to bound") + mockComponentPVCsAndBound(comp, updatedReplicas, true, storageClassName) - volumeSnapshotEnabled := testk8s.IsMockVolumeSnapshotEnabled(&testCtx, testk8s.DefaultStorageClassName) if policy != nil { - if !volumeSnapshotEnabled && len(viper.GetString(constant.CfgKeyBackupPVCName)) > 0 { - By("Checking restore job created") - Eventually(testapps.List(&testCtx, generics.JobSignature, - client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: comp.Name, - constant.KBManagedByKey: "cluster", - }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(updatedReplicas - int(comp.Replicas))) - - By("Mocking job status to succeeded") - ml := client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: comp.Name, - constant.KBManagedByKey: "cluster", - } - jobList := batchv1.JobList{} - Expect(testCtx.Cli.List(testCtx.Ctx, &jobList, ml)).Should(Succeed()) - for _, job := range jobList.Items { - key := client.ObjectKeyFromObject(&job) - Expect(testapps.GetAndChangeObjStatus(&testCtx, key, func(job *batchv1.Job) { - job.Status.Succeeded = 1 - })()).Should(Succeed()) - } - } + checkRestoreAndSetCompleted(clusterKey, comp.Name, updatedReplicas-int(comp.Replicas)) } - By("Mock PVCs status to bound") - mockComponentPVCsBound(comp, updatedReplicas, false, "") - if policy != nil { - By("Checking backup cleanup") - Eventually(testapps.List(&testCtx, generics.BackupSignature, - client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: comp.Name, - }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(0)) - - if !volumeSnapshotEnabled && len(viper.GetString(constant.CfgKeyBackupPVCName)) > 0 { - By("Checking restore job cleanup") - Eventually(testapps.List(&testCtx, generics.JobSignature, - client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: comp.Name, - }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(0)) - } + By("Checking Backup and Restore cleanup") + Eventually(testapps.List(&testCtx, generics.BackupSignature, ml, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(0)) + Eventually(testapps.List(&testCtx, generics.RestoreSignature, ml, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(0)) } checkUpdatedStsReplicas() @@ -800,37 +771,45 @@ var _ = Describe("Cluster Controller", func() { } By("Checking backup policy created from backup policy template") - policyName := DeriveBackupPolicyName(clusterKey.Name, compDef.Name, "") + policyName := generateBackupPolicyName(clusterKey.Name, compDef.Name, "") clusterDef.Spec.ComponentDefs[i].HorizontalScalePolicy = &appsv1alpha1.HorizontalScalePolicy{ Type: policyType, BackupPolicyTemplateName: backupPolicyTPLName, } Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKey{Name: policyName, Namespace: clusterKey.Namespace}, - &dataprotectionv1alpha1.BackupPolicy{}, true)).Should(Succeed()) + &dpv1alpha1.BackupPolicy{}, true)).Should(Succeed()) if policyType == appsv1alpha1.HScaleDataClonePolicyCloneVolume { - By("creating backup tool if backup policy is backup") - backupTool := &dataprotectionv1alpha1.BackupTool{ + By("creating actionSet if backup policy is backup") + actionSet := &dpv1alpha1.ActionSet{ ObjectMeta: metav1.ObjectMeta{ - Name: backupToolName, + Name: actionSetName, Namespace: clusterKey.Namespace, Labels: map[string]string{ constant.ClusterDefLabelKey: clusterDef.Name, }, }, - Spec: dataprotectionv1alpha1.BackupToolSpec{ - BackupCommands: []string{""}, - Image: "xtrabackup", + Spec: dpv1alpha1.ActionSetSpec{ Env: []corev1.EnvVar{ { Name: "test-name", Value: "test-value", }, }, - Physical: &dataprotectionv1alpha1.PhysicalConfig{ - BackupToolRestoreCommand: dataprotectionv1alpha1.BackupToolRestoreCommand{ - RestoreCommands: []string{ + BackupType: dpv1alpha1.BackupTypeFull, + Backup: &dpv1alpha1.BackupActionSpec{ + BackupData: &dpv1alpha1.BackupDataActionSpec{ + JobActionSpec: dpv1alpha1.JobActionSpec{ + Image: "xtrabackup", + Command: []string{""}, + }, + }, + }, + Restore: &dpv1alpha1.RestoreActionSpec{ + PrepareData: &dpv1alpha1.JobActionSpec{ + Image: "xtrabackup", + Command: []string{ "sh", "-c", "/backup_scripts.sh", @@ -839,7 +818,7 @@ var _ = Describe("Cluster Controller", func() { }, }, } - testapps.CheckedCreateK8sResource(&testCtx, backupTool) + testapps.CheckedCreateK8sResource(&testCtx, actionSet) } } })()).ShouldNot(HaveOccurred()) @@ -858,8 +837,8 @@ var _ = Describe("Cluster Controller", func() { setHorizontalScalePolicy(policyType, componentDefsWithHScalePolicy...) By("Mocking all components' PVCs to bound") - for _, comp := range clusterObj.Spec.ComponentSpecs { - mockComponentPVCsBound(&comp, int(comp.Replicas), true, storageClassName) + for _, comp := range cluster.Spec.ComponentSpecs { + mockComponentPVCsAndBound(&comp, int(comp.Replicas), true, storageClassName) } hscalePolicy := func(comp appsv1alpha1.ClusterComponentSpec) *appsv1alpha1.HorizontalScalePolicy { @@ -873,16 +852,16 @@ var _ = Describe("Cluster Controller", func() { By("Get the latest cluster def") Expect(k8sClient.Get(testCtx.Ctx, client.ObjectKeyFromObject(clusterDefObj), clusterDefObj)).Should(Succeed()) - for i, comp := range clusterObj.Spec.ComponentSpecs { + for i, comp := range cluster.Spec.ComponentSpecs { lorry.SetMockClient(&mockLorryClient{replicas: updatedReplicas, clusterKey: clusterKey, compName: comp.Name}, nil) By(fmt.Sprintf("H-scale component %s with policy %s", comp.Name, hscalePolicy(comp))) - horizontalScaleComp(updatedReplicas, &clusterObj.Spec.ComponentSpecs[i], storageClassName, hscalePolicy(comp)) + horizontalScaleComp(updatedReplicas, &cluster.Spec.ComponentSpecs[i], storageClassName, hscalePolicy(comp)) } By("Checking cluster status and the number of replicas changed") Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)). - Should(BeEquivalentTo(initialGeneration + len(clusterObj.Spec.ComponentSpecs))) + Should(BeEquivalentTo(initialGeneration + len(cluster.Spec.ComponentSpecs))) } testHorizontalScale := func(compName, compDefName string, initialReplicas, updatedReplicas int32, @@ -1619,7 +1598,7 @@ var _ = Describe("Cluster Controller", func() { By("Create and Mock PVCs status to bound") for _, comp := range clusterObj.Spec.ComponentSpecs { - mockComponentPVCsBound(&comp, int(comp.Replicas), true, testk8s.DefaultStorageClassName) + mockComponentPVCsAndBound(&comp, int(comp.Replicas), true, testk8s.DefaultStorageClassName) } By(fmt.Sprintf("Changing replicas to %d", updatedReplicas)) @@ -1635,14 +1614,14 @@ var _ = Describe("Cluster Controller", func() { ml, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(1)) By("Mocking backup status to failed") - backupList := dataprotectionv1alpha1.BackupList{} + backupList := dpv1alpha1.BackupList{} Expect(testCtx.Cli.List(testCtx.Ctx, &backupList, ml)).Should(Succeed()) backupKey := types.NamespacedName{ Namespace: backupList.Items[0].Namespace, Name: backupList.Items[0].Name, } - Expect(testapps.GetAndChangeObjStatus(&testCtx, backupKey, func(backup *dataprotectionv1alpha1.Backup) { - backup.Status.Phase = dataprotectionv1alpha1.BackupFailed + Expect(testapps.GetAndChangeObjStatus(&testCtx, backupKey, func(backup *dpv1alpha1.Backup) { + backup.Status.Phase = dpv1alpha1.BackupPhaseFailed })()).Should(Succeed()) By("Checking cluster status failed with backup error") @@ -1652,15 +1631,10 @@ var _ = Describe("Cluster Controller", func() { var err error for _, cond := range cluster.Status.Conditions { if strings.Contains(cond.Message, "backup for horizontalScaling failed") { - g.Expect(cond.Message).Should(ContainSubstring("backup for horizontalScaling failed")) err = errors.New("has backup error") break } } - if err == nil { - // this expectation is intended to print all cluster.Status.Conditions - g.Expect(cluster.Status.Conditions).Should(BeEmpty()) - } g.Expect(err).Should(HaveOccurred()) })).Should(Succeed()) @@ -1948,7 +1922,7 @@ var _ = Describe("Cluster Controller", func() { Namespace: clusterKey.Namespace, Name: getPVCName(testapps.DataVolumeName, compName, i), } - createPVC(clusterKey.Name, pvcKey.Name, compName, "") + createPVC(clusterKey.Name, pvcKey.Name, compName, "", "") Eventually(testapps.CheckObjExists(&testCtx, pvcKey, &corev1.PersistentVolumeClaim{}, true)).Should(Succeed()) Expect(testapps.GetAndChangeObjStatus(&testCtx, pvcKey, func(pvc *corev1.PersistentVolumeClaim) { pvc.Status.Phase = corev1.ClaimBound @@ -1956,7 +1930,7 @@ var _ = Describe("Cluster Controller", func() { } } - By("delete the cluster and should preserved PVC,Secret,CM resources") + By("delete the cluster and should be preserved PVC,Secret,CM resources") deleteCluster := func(termPolicy appsv1alpha1.TerminationPolicyType) { // TODO: would be better that cluster is created with terminationPolicy=Halt instead of // reassign the value after created @@ -2024,7 +1998,7 @@ var _ = Describe("Cluster Controller", func() { return i.UID == j.UID })).Should(BeTrue()) - By("delete the cluster and should preserved PVC,Secret,CM resources but result updated the new last applied cluster UID") + By("delete the cluster and should be preserved PVC,Secret,CM resources but result updated the new last applied cluster UID") deleteCluster(appsv1alpha1.Halt) checkPreservedObjects(clusterObj.UID) }) @@ -2044,9 +2018,11 @@ var _ = Describe("Cluster Controller", func() { When("creating cluster with backup configuration", func() { const ( - compName = statefulCompName - compDefName = statefulCompDefName - backupRepoName = "test-backup-repo" + compName = statefulCompName + compDefName = statefulCompDefName + backupRepoName = "test-backup-repo" + backupMethodName = "test-backup-method" + volumeSnapshotBackupMethodName = "test-vs-backup-method" ) BeforeEach(func() { cleanEnv() @@ -2081,8 +2057,8 @@ var _ = Describe("Cluster Controller", func() { int64Ptr = func(in int64) *int64 { return &in } - strPtr = func(s string) *string { - return &s + retention = func(s string) dpv1alpha1.RetentionPeriod { + return dpv1alpha1.RetentionPeriod(s) } ) @@ -2094,8 +2070,8 @@ var _ = Describe("Cluster Controller", func() { desc: "backup with snapshot method", backup: &appsv1alpha1.ClusterBackup{ Enabled: &boolTrue, - RetentionPeriod: strPtr("1d"), - Method: dataprotectionv1alpha1.BackupMethodSnapshot, + RetentionPeriod: retention("1d"), + Method: vsBackupMethodName, CronExpression: "*/1 * * * *", StartingDeadlineMinutes: int64Ptr(int64(10)), PITREnabled: &boolTrue, @@ -2106,8 +2082,8 @@ var _ = Describe("Cluster Controller", func() { desc: "disable backup", backup: &appsv1alpha1.ClusterBackup{ Enabled: &boolFalse, - RetentionPeriod: strPtr("1d"), - Method: dataprotectionv1alpha1.BackupMethodSnapshot, + RetentionPeriod: retention("1d"), + Method: vsBackupMethodName, CronExpression: "*/1 * * * *", StartingDeadlineMinutes: int64Ptr(int64(10)), PITREnabled: &boolTrue, @@ -2115,11 +2091,11 @@ var _ = Describe("Cluster Controller", func() { }, }, { - desc: "backup with backup tool method", + desc: "backup with backup tool", backup: &appsv1alpha1.ClusterBackup{ Enabled: &boolTrue, - RetentionPeriod: strPtr("2d"), - Method: dataprotectionv1alpha1.BackupMethodBackupTool, + RetentionPeriod: retention("2d"), + Method: backupMethodName, CronExpression: "*/1 * * * *", StartingDeadlineMinutes: int64Ptr(int64(10)), RepoName: backupRepoName, @@ -2136,47 +2112,37 @@ var _ = Describe("Cluster Controller", func() { By(t.desc) backup := t.backup createClusterWithBackup(backup) - checkSchedulePolicy := func(g Gomega, sp *dataprotectionv1alpha1.SchedulePolicy) { - g.Expect(sp).ShouldNot(BeNil()) - g.Expect(sp.Enable).Should(BeEquivalentTo(*backup.Enabled)) - g.Expect(sp.CronExpression).Should(Equal(backup.CronExpression)) - } - checkPolicy := func(g Gomega, p *dataprotectionv1alpha1.BackupPolicy) { - schedule := p.Spec.Schedule - switch backup.Method { - case dataprotectionv1alpha1.BackupMethodSnapshot: - checkSchedulePolicy(g, schedule.Snapshot) - case dataprotectionv1alpha1.BackupMethodBackupTool: - checkSchedulePolicy(g, schedule.Datafile) + + checkSchedule := func(g Gomega, schedule *dpv1alpha1.BackupSchedule) { + var policy *dpv1alpha1.SchedulePolicy + for i, s := range schedule.Spec.Schedules { + if s.BackupMethod == backup.Method { + Expect(*s.Enabled).Should(BeEquivalentTo(*backup.Enabled)) + policy = &schedule.Spec.Schedules[i] + } } - g.Expect(schedule.Logfile.Enable).Should(BeEquivalentTo(*backup.PITREnabled)) - g.Expect(*p.Spec.Logfile.BackupRepoName).Should(BeEquivalentTo(backup.RepoName)) - g.Expect(schedule.StartingDeadlineMinutes).Should(Equal(backup.StartingDeadlineMinutes)) - } - checkPolicyDisabled := func(g Gomega, p *dataprotectionv1alpha1.BackupPolicy) { - schedule := p.Spec.Schedule - switch backup.Method { - case dataprotectionv1alpha1.BackupMethodSnapshot: - g.Expect(schedule.Snapshot.Enable).Should(BeFalse()) - case dataprotectionv1alpha1.BackupMethodBackupTool: - g.Expect(schedule.Datafile.Enable).Should(BeFalse()) + if backup.Enabled != nil && *backup.Enabled { + Expect(policy).ShouldNot(BeNil()) + Expect(policy.RetentionPeriod).Should(BeEquivalentTo(backup.RetentionPeriod)) + Expect(policy.CronExpression).Should(BeEquivalentTo(backup.CronExpression)) } } - policyName := DeriveBackupPolicyName(clusterKey.Name, compDefName, "") - Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: policyName, Namespace: clusterKey.Namespace}, - func(g Gomega, policy *dataprotectionv1alpha1.BackupPolicy) { - if backup == nil { - // if cluster.Spec.Backup is nil, will use the default backup policy - g.Expect(policy).ShouldNot(BeNil()) - g.Expect(policy.Spec.Schedule).ShouldNot(BeNil()) - g.Expect(policy.Spec.Schedule.Snapshot).ShouldNot(BeNil()) - g.Expect(policy.Spec.Schedule.Snapshot.Enable).Should(BeFalse()) - } else if boolValue(backup.Enabled) { - checkPolicy(g, policy) - } else { - checkPolicyDisabled(g, policy) - } - })).Should(Succeed()) + + By("checking backup policy exists") + backupPolicyName := generateBackupPolicyName(clusterKey.Name, compDefName, "") + backupPolicyKey := client.ObjectKey{Name: backupPolicyName, Namespace: clusterKey.Namespace} + backupPolicy := &dpv1alpha1.BackupPolicy{} + Eventually(testapps.CheckObjExists(&testCtx, backupPolicyKey, backupPolicy, true)).Should(Succeed()) + + By("checking backup schedule") + backupScheduleName := generateBackupScheduleName(clusterKey.Name, compDefName, "") + backupScheduleKey := client.ObjectKey{Name: backupScheduleName, Namespace: clusterKey.Namespace} + if backup == nil { + Eventually(testapps.CheckObjExists(&testCtx, backupScheduleKey, + &dpv1alpha1.BackupSchedule{}, false)).Should(Succeed()) + continue + } + Eventually(testapps.CheckObj(&testCtx, backupScheduleKey, checkSchedule)).Should(Succeed()) } }) }) @@ -2345,53 +2311,47 @@ var _ = Describe("Cluster Controller", func() { By("mock backuptool object") backupPolicyName := "test-backup-policy" backupName := "test-backup" - backupTool := testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dataprotectionv1alpha1.BackupTool{}, testapps.RandomizedObjName()) + _ = testapps.CreateCustomizedObj(&testCtx, "backup/actionset.yaml", + &dpv1alpha1.ActionSet{}, testapps.RandomizedObjName()) By("creating backup") - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). + backup := testdp.NewBackupFactory(testCtx.DefaultNamespace, backupName). SetBackupPolicyName(backupPolicyName). - SetBackupType(dataprotectionv1alpha1.BackupTypeDataFile). + SetBackupMethod(testdp.BackupMethodName). Create(&testCtx).GetObject() By("mocking backup status completed, we don't need backup reconcile here") - Eventually(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(backup), func(backup *dataprotectionv1alpha1.Backup) { - backup.Status.BackupToolName = backupTool.Name + Eventually(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(backup), func(backup *dpv1alpha1.Backup) { backup.Status.PersistentVolumeClaimName = "backup-pvc" - backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted + backup.Status.Phase = dpv1alpha1.BackupPhaseCompleted + testdp.MockBackupStatusMethod(backup, testapps.DataVolumeName) })).Should(Succeed()) - By("checking backup status completed") - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), - func(g Gomega, tmpBackup *dataprotectionv1alpha1.Backup) { - g.Expect(tmpBackup.Status.Phase).Should(Equal(dataprotectionv1alpha1.BackupCompleted)) - })).Should(Succeed()) - By("creating cluster with backup") - restoreFromBackup := fmt.Sprintf(`{"%s":"%s"}`, compName, backupName) + restoreFromBackup := fmt.Sprintf(`{"%s":{"name":"%s"}}`, compName, backupName) pvcSpec := testapps.NewPVCSpec("1Gi") + replicas := 3 clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefObj.Name, clusterVersionObj.Name).WithRandomName(). AddComponent(compName, compDefName). - SetReplicas(3). + SetReplicas(int32(replicas)). AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec). - AddAnnotations(constant.RestoreFromBackUpAnnotationKey, restoreFromBackup).Create(&testCtx).GetObject() + AddAnnotations(constant.RestoreFromBackupAnnotationKey, restoreFromBackup).Create(&testCtx).GetObject() clusterKey = client.ObjectKeyFromObject(clusterObj) - By("mocking restore job completed") - patchK8sJobStatus := func(key types.NamespacedName, jobStatus batchv1.JobConditionType) { - Eventually(testapps.GetAndChangeObjStatus(&testCtx, key, func(fetched *batchv1.Job) { - jobCondition := batchv1.JobCondition{Type: jobStatus} - fetched.Status.Conditions = append(fetched.Status.Conditions, jobCondition) - })).Should(Succeed()) - } - for i := 0; i < 3; i++ { - restoreJobKey := client.ObjectKey{ - Name: fmt.Sprintf("base-%s-%s-%s-%d", testapps.DataVolumeName, clusterObj.Name, compName, i), - Namespace: clusterKey.Namespace, - } - patchK8sJobStatus(restoreJobKey, batchv1.JobComplete) + // mock pvcs have restored + mockComponentPVCsAndBound(clusterObj.Spec.GetComponentByName(compName), replicas, true, testk8s.DefaultStorageClassName) + By("wait for restore created") + ml := client.MatchingLabels{ + constant.AppInstanceLabelKey: clusterKey.Name, + constant.KBAppComponentLabelKey: compName, } + Eventually(testapps.List(&testCtx, generics.RestoreSignature, + ml, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(1)) + + By("Mocking restore phase to Completed") + // mock prepareData restore completed + mockRestoreCompleted(ml) By("Waiting for the cluster controller to create resources completely") waitForCreatingResourceCompletely(clusterKey, compName) @@ -2417,14 +2377,11 @@ var _ = Describe("Cluster Controller", func() { })).Should(Succeed()) By("clean up annotations after cluster running") - Expect(testapps.GetAndChangeObjStatus(&testCtx, clusterKey, func(tmpCluster *appsv1alpha1.Cluster) { - compStatus := tmpCluster.Status.Components[compName] - compStatus.Phase = appsv1alpha1.RunningClusterCompPhase - tmpCluster.Status.Components[compName] = compStatus - })()).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, clusterKey, func(g Gomega, tmpCluster *appsv1alpha1.Cluster) { g.Expect(tmpCluster.Status.Phase).Should(Equal(appsv1alpha1.RunningClusterPhase)) - g.Expect(tmpCluster.Annotations[constant.RestoreFromBackUpAnnotationKey]).Should(BeEmpty()) + // mock postReady restore completed + mockRestoreCompleted(ml) + g.Expect(tmpCluster.Annotations[constant.RestoreFromBackupAnnotationKey]).Should(BeEmpty()) })).Should(Succeed()) }) }) @@ -2633,9 +2590,13 @@ func createBackupPolicyTpl(clusterDefObj *appsv1alpha1.ClusterDefinition) { AddLabels(constant.ClusterDefLabelKey, clusterDefObj.Name). SetClusterDefRef(clusterDefObj.Name) for _, v := range clusterDefObj.Spec.ComponentDefs { - bpt = bpt.AddBackupPolicy(v.Name).AddSnapshotPolicy().SetSchedule("0 0 * * *", false) - bpt = bpt.AddDatafilePolicy().SetSchedule("0 0 * * *", false) - bpt = bpt.AddIncrementalPolicy().SetSchedule("0 0 * * *", false) + bpt = bpt.AddBackupPolicy(v.Name). + AddBackupMethod(backupMethodName, false, actionSetName). + SetBackupMethodVolumeMounts("data", "/data"). + AddBackupMethod(vsBackupMethodName, true, vsActionSetName). + SetBackupMethodVolumes([]string{"data"}). + AddSchedule(backupMethodName, "0 0 * * *", true). + AddSchedule(vsBackupMethodName, "0 0 * * *", true) switch v.WorkloadType { case appsv1alpha1.Consensus: bpt.SetTargetRole("leader") @@ -2664,3 +2625,28 @@ func outOfOrderEqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) boo } return true } + +func mockRestoreCompleted(ml client.MatchingLabels) { + restoreList := dpv1alpha1.RestoreList{} + Expect(testCtx.Cli.List(testCtx.Ctx, &restoreList, ml)).Should(Succeed()) + for _, rs := range restoreList.Items { + err := testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(&rs), func(res *dpv1alpha1.Restore) { + res.Status.Phase = dpv1alpha1.RestorePhaseCompleted + })() + Expect(client.IgnoreNotFound(err)).ShouldNot(HaveOccurred()) + } +} + +func checkRestoreAndSetCompleted(clusterKey types.NamespacedName, compName string, scaleOutReplicas int) { + By("Checking restore CR created") + ml := client.MatchingLabels{ + constant.AppInstanceLabelKey: clusterKey.Name, + constant.KBAppComponentLabelKey: compName, + constant.KBManagedByKey: "cluster", + } + Eventually(testapps.List(&testCtx, generics.RestoreSignature, + ml, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(scaleOutReplicas)) + + By("Mocking restore phase to succeeded") + mockRestoreCompleted(ml) +} diff --git a/controllers/apps/cluster_plan_builder_test.go b/controllers/apps/cluster_plan_builder_test.go index 468b4cc208a..2e7fc946b3d 100644 --- a/controllers/apps/cluster_plan_builder_test.go +++ b/controllers/apps/cluster_plan_builder_test.go @@ -62,7 +62,7 @@ var _ = Describe("cluster plan builder test", func() { testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.VolumeSnapshotSignature, true, inNS) // non-namespaced testapps.ClearResources(&testCtx, generics.BackupPolicyTemplateSignature, ml) - testapps.ClearResources(&testCtx, generics.BackupToolSignature, ml) + testapps.ClearResources(&testCtx, generics.ActionSetSignature, ml) testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml) } diff --git a/controllers/apps/cluster_status_event_handler_test.go b/controllers/apps/cluster_status_event_handler_test.go index dc3acf463ef..7ee30195921 100644 --- a/controllers/apps/cluster_status_event_handler_test.go +++ b/controllers/apps/cluster_status_event_handler_test.go @@ -113,7 +113,7 @@ var _ = Describe("test cluster Failed/Abnormal phase", func() { // AddAppInstanceLabel(clusterName). // AddAppComponentLabel(componentName). // AddRoleLabel(podRole). - // AddAppManangedByLabel(). + // AddAppManagedByLabel(). // AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). // Create(&testCtx).GetObject() // } diff --git a/controllers/apps/components/component.go b/controllers/apps/components/component.go index 2c4c84eca14..1469cab519b 100644 --- a/controllers/apps/components/component.go +++ b/controllers/apps/components/component.go @@ -729,8 +729,8 @@ func (c *rsmComponent) isScaleOutFailed(reqCtx intctrlutil.RequestCtx, cli clien } else if status == backupStatusFailed { return true, nil } - for _, name := range d.pvcKeysToRestore() { - if status, err := d.checkRestoreStatus(name); err != nil { + for i := *c.runningWorkload.Spec.Replicas; i < c.component.Replicas; i++ { + if status, err := d.checkRestoreStatus(i); err != nil { return false, err } else if status == backupStatusFailed { return true, nil diff --git a/controllers/apps/components/hscale_volume_populator.go b/controllers/apps/components/hscale_volume_populator.go index 2fb3446284c..c1f72256424 100644 --- a/controllers/apps/components/hscale_volume_populator.go +++ b/controllers/apps/components/hscale_volume_populator.go @@ -25,7 +25,6 @@ import ( snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -33,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/component" "github.com/apecloud/kubeblocks/internal/controller/factory" @@ -51,9 +50,8 @@ type dataClone interface { checkBackupStatus() (backupStatus, error) backup() ([]client.Object, error) - pvcKeysToRestore() []types.NamespacedName - checkRestoreStatus(types.NamespacedName) (backupStatus, error) - restore(name types.NamespacedName) ([]client.Object, error) + checkRestoreStatus(startingIndex int32) (backupStatus, error) + restore(startingIndex int32) ([]client.Object, error) } type backupStatus string @@ -89,23 +87,6 @@ func newDataClone(reqCtx intctrlutil.RequestCtx, }, nil } if component.HorizontalScalePolicy.Type == appsv1alpha1.HScaleDataClonePolicyCloneVolume { - volumeSnapshotEnabled, err := isVolumeSnapshotEnabled(reqCtx.Ctx, cli, stsObj, backupVCT(component)) - if err != nil { - return nil, err - } - if volumeSnapshotEnabled { - return &snapshotDataClone{ - baseDataClone{ - reqCtx: reqCtx, - cli: cli, - cluster: cluster, - component: component, - stsObj: stsObj, - stsProto: stsProto, - key: key, - }, - }, nil - } return &backupDataClone{ baseDataClone{ reqCtx: reqCtx, @@ -158,17 +139,15 @@ func (d *baseDataClone) cloneData(realDataClone dataClone) ([]client.Object, err panic(fmt.Sprintf("unexpected backup status: %s, clustre: %s, component: %s", status, d.cluster.Name, d.component.Name)) } - // backup's ready, then start to check restore - for _, pvcKey := range d.pvcKeysToRestore() { - restoreStatus, err := realDataClone.checkRestoreStatus(pvcKey) + for i := *d.stsObj.Spec.Replicas; i < d.component.Replicas; i++ { + restoreStatus, err := realDataClone.checkRestoreStatus(i) if err != nil { return nil, err } switch restoreStatus { case backupStatusNotCreated: - - restoreObjs, err := realDataClone.restore(pvcKey) + restoreObjs, err := realDataClone.restore(i) if err != nil { return nil, err } @@ -181,7 +160,6 @@ func (d *baseDataClone) cloneData(realDataClone dataClone) ([]client.Object, err status, d.cluster.Name, d.component.Name)) } } - // create PVCs that do not need to restore pvcObjs, err := d.createPVCs(d.excludeBackupVCTs()) if err != nil { @@ -244,19 +222,6 @@ func (d *baseDataClone) excludeBackupVCTs() []*corev1.PersistentVolumeClaimTempl return vcts } -func (d *baseDataClone) pvcKeysToRestore() []types.NamespacedName { - var pvcKeys []types.NamespacedName - backupVct := d.backupVCT() - for i := *d.stsObj.Spec.Replicas; i < d.component.Replicas; i++ { - pvcKey := types.NamespacedName{ - Namespace: d.stsObj.Namespace, - Name: fmt.Sprintf("%s-%s-%d", backupVct.Name, d.stsObj.Name, i), - } - pvcKeys = append(pvcKeys, pvcKey) - } - return pvcKeys -} - func (d *baseDataClone) createPVCs(vcts []*corev1.PersistentVolumeClaimTemplate) ([]client.Object, error) { objs := make([]client.Object, 0) for i := *d.stsObj.Spec.Replicas; i < d.component.Replicas; i++ { @@ -277,8 +242,8 @@ func (d *baseDataClone) createPVCs(vcts []*corev1.PersistentVolumeClaimTemplate) return objs, nil } -func (d *baseDataClone) getBackupMatchingLabels() client.MatchingLabels { - return client.MatchingLabels{ +func (d *baseDataClone) getBRLabels() map[string]string { + return map[string]string{ constant.AppInstanceLabelKey: d.cluster.Name, constant.KBAppComponentLabelKey: d.component.Name, constant.KBManagedByKey: "cluster", // the resources are managed by which controller @@ -311,217 +276,14 @@ func (d *dummyDataClone) backup() ([]client.Object, error) { panic("runtime error: dummyDataClone.backup called") } -func (d *dummyDataClone) checkRestoreStatus(types.NamespacedName) (backupStatus, error) { +func (d *dummyDataClone) checkRestoreStatus(startingIndex int32) (backupStatus, error) { return backupStatusReadyToUse, nil } -func (d *dummyDataClone) restore(name types.NamespacedName) ([]client.Object, error) { +func (d *dummyDataClone) restore(startingIndex int32) ([]client.Object, error) { panic("runtime error: dummyDataClone.restore called") } -type snapshotDataClone struct { - baseDataClone -} - -var _ dataClone = &snapshotDataClone{} - -func (d *snapshotDataClone) succeed() (bool, error) { - if len(d.component.VolumeClaimTemplates) == 0 { - d.reqCtx.Recorder.Eventf(d.cluster, - corev1.EventTypeNormal, - "HorizontalScale", - "no VolumeClaimTemplates, no need to do data clone.") - return true, nil - } - return d.checkAllPVCsExist() -} - -func (d *snapshotDataClone) clearTmpResources() ([]client.Object, error) { - allPVCBound, err := d.isAllPVCBound() - if err != nil { - return nil, err - } - if !allPVCBound { - return nil, nil - } - return d.deleteSnapshot() -} - -func (d *snapshotDataClone) backup() ([]client.Object, error) { - objs := make([]client.Object, 0) - backupPolicyTplName := d.component.HorizontalScalePolicy.BackupPolicyTemplateName - - backupPolicyTemplate := &appsv1alpha1.BackupPolicyTemplate{} - err := d.cli.Get(d.reqCtx.Ctx, client.ObjectKey{Name: backupPolicyTplName}, backupPolicyTemplate) - if err != nil { - return nil, err - } - - // if there is backuppolicytemplate created by provider - backupPolicy, err := getBackupPolicyFromTemplate(d.reqCtx, d.cli, d.cluster, d.component.ComponentDef, backupPolicyTplName) - if err != nil { - return nil, err - } - if backupPolicy == nil { - return nil, intctrlutil.NewNotFound("not found any backup policy created by %s", backupPolicyTplName) - } - backup := factory.BuildBackup(d.cluster, d.component, backupPolicy.Name, d.key, "snapshot") - objs = append(objs, backup) - d.reqCtx.Recorder.Eventf(d.cluster, corev1.EventTypeNormal, "BackupJobCreate", "Create backupJob/%s", d.key.Name) - return objs, nil -} - -func (d *snapshotDataClone) checkBackupStatus() (backupStatus, error) { - backupPolicyTplName := d.component.HorizontalScalePolicy.BackupPolicyTemplateName - backupPolicyTemplate := &appsv1alpha1.BackupPolicyTemplate{} - err := d.cli.Get(d.reqCtx.Ctx, client.ObjectKey{Name: backupPolicyTplName}, backupPolicyTemplate) - if err != nil { - return backupStatusFailed, err - } - backup := dataprotectionv1alpha1.Backup{} - if err := d.cli.Get(d.reqCtx.Ctx, d.key, &backup); err != nil { - if errors.IsNotFound(err) { - return backupStatusNotCreated, nil - } else { - return backupStatusFailed, err - } - } - if backup.Status.Phase == dataprotectionv1alpha1.BackupFailed { - return backupStatusFailed, intctrlutil.NewErrorf(intctrlutil.ErrorTypeBackupFailed, "backup for horizontalScaling failed: %s", - backup.Status.FailureReason) - } - if backup.Status.Phase != dataprotectionv1alpha1.BackupCompleted { - return backupStatusProcessing, nil - } - return backupStatusReadyToUse, nil -} - -func (d *snapshotDataClone) restore(pvcKey types.NamespacedName) ([]client.Object, error) { - objs := make([]client.Object, 0) - vct := d.backupVCT() - // create pvc from snapshot for every new pod - if pvc, err := d.checkedCreatePVCFromSnapshot( - pvcKey, - vct); err != nil { - d.reqCtx.Log.Error(err, "checkedCreatePVCFromSnapshot failed") - return nil, err - } else if pvc != nil { - objs = append(objs, pvc) - } - return objs, nil -} - -func (d *snapshotDataClone) checkRestoreStatus(pvcKey types.NamespacedName) (backupStatus, error) { - pvc := corev1.PersistentVolumeClaim{} - if err := d.cli.Get(d.reqCtx.Ctx, pvcKey, &pvc); err != nil { - if errors.IsNotFound(err) { - return backupStatusNotCreated, nil - } - return backupStatusFailed, err - } - return backupStatusReadyToUse, nil -} - -func (d *snapshotDataClone) listVolumeSnapshotByLabels(vsList *snapshotv1.VolumeSnapshotList, ml client.MatchingLabels) error { - compatClient := intctrlutil.VolumeSnapshotCompatClient{ReadonlyClient: d.cli, Ctx: d.reqCtx.Ctx} - // get vs from backup. - backupList := dataprotectionv1alpha1.BackupList{} - if err := d.cli.List(d.reqCtx.Ctx, &backupList, client.InNamespace(d.cluster.Namespace), ml); err != nil { - return err - } else if len(backupList.Items) == 0 { - // ignore not found - return nil - } - return compatClient.List(vsList, client.MatchingLabels{ - constant.DataProtectionLabelBackupNameKey: backupList.Items[0].Name, - }) -} - -func (d *snapshotDataClone) checkedCreatePVCFromSnapshot(pvcKey types.NamespacedName, - vct *corev1.PersistentVolumeClaimTemplate) (client.Object, error) { - pvc := corev1.PersistentVolumeClaim{} - // check pvc existence - if err := d.cli.Get(d.reqCtx.Ctx, pvcKey, &pvc); err != nil { - if !errors.IsNotFound(err) { - return nil, err - } - ml := d.getBackupMatchingLabels() - vsList := snapshotv1.VolumeSnapshotList{} - if err = d.listVolumeSnapshotByLabels(&vsList, ml); err != nil { - return nil, err - } - if len(vsList.Items) == 0 { - return nil, fmt.Errorf("volumesnapshot not found for cluster %s component %s", d.cluster.Name, d.component.Name) - } - // exclude volumes that are deleting - vsName := "" - for _, vs := range vsList.Items { - if vs.DeletionTimestamp != nil { - continue - } - vsName = vs.Name - break - } - return d.createPVCFromSnapshot(vct, pvcKey, vsName) - } - return nil, nil -} - -func (d *snapshotDataClone) createPVCFromSnapshot( - vct *corev1.PersistentVolumeClaimTemplate, - pvcKey types.NamespacedName, - snapshotName string) (client.Object, error) { - pvc := factory.BuildPVC(d.cluster, d.component, vct, pvcKey, snapshotName) - return pvc, nil -} - -func (d *snapshotDataClone) deleteSnapshot() ([]client.Object, error) { - objs, err := d.deleteBackup() - if err != nil { - return nil, err - } - if len(objs) > 0 { - d.reqCtx.Recorder.Eventf(d.cluster, corev1.EventTypeNormal, "BackupJobDelete", "Delete backupJob/%s", d.key.Name) - } - - return objs, nil -} - -// deleteBackup will delete all backup related resources created during horizontal scaling -func (d *snapshotDataClone) deleteBackup() ([]client.Object, error) { - ml := d.getBackupMatchingLabels() - backupList := dataprotectionv1alpha1.BackupList{} - if err := d.cli.List(d.reqCtx.Ctx, &backupList, client.InNamespace(d.cluster.Namespace), ml); err != nil { - return nil, err - } - objs := make([]client.Object, 0) - for i := range backupList.Items { - objs = append(objs, &backupList.Items[i]) - } - return objs, nil -} - -func (d *snapshotDataClone) isAllPVCBound() (bool, error) { - if len(d.stsObj.Spec.VolumeClaimTemplates) == 0 { - return true, nil - } - for i := 0; i < int(d.component.Replicas); i++ { - pvcKey := types.NamespacedName{ - Namespace: d.stsObj.Namespace, - Name: fmt.Sprintf("%s-%s-%d", d.stsObj.Spec.VolumeClaimTemplates[0].Name, d.stsObj.Name, i), - } - pvc := corev1.PersistentVolumeClaim{} - // check pvc existence - if err := d.cli.Get(d.reqCtx.Ctx, pvcKey, &pvc); err != nil { - return false, client.IgnoreNotFound(err) - } - if pvc.Status.Phase != corev1.ClaimBound { - return false, nil - } - } - return true, nil -} - type backupDataClone struct { baseDataClone } @@ -540,8 +302,8 @@ func (d *backupDataClone) succeed() (bool, error) { if err != nil || !allPVCsExist { return allPVCsExist, err } - for _, pvcKey := range d.pvcKeysToRestore() { - restoreStatus, err := d.checkRestoreStatus(pvcKey) + for i := *d.stsObj.Spec.Replicas; i < d.component.Replicas; i++ { + restoreStatus, err := d.checkRestoreStatus(i) if err != nil { return false, err } @@ -555,21 +317,20 @@ func (d *backupDataClone) succeed() (bool, error) { func (d *backupDataClone) clearTmpResources() ([]client.Object, error) { objs := make([]client.Object, 0) // delete backup - ml := d.getBackupMatchingLabels() - backupList := dataprotectionv1alpha1.BackupList{} - if err := d.cli.List(d.reqCtx.Ctx, &backupList, client.InNamespace(d.cluster.Namespace), ml); err != nil { + brLabels := d.getBRLabels() + backupList := dpv1alpha1.BackupList{} + if err := d.cli.List(d.reqCtx.Ctx, &backupList, client.InNamespace(d.cluster.Namespace), client.MatchingLabels(brLabels)); err != nil { return nil, err } for i := range backupList.Items { objs = append(objs, &backupList.Items[i]) } - // delete restore job - jobList := v1.JobList{} - if err := d.cli.List(d.reqCtx.Ctx, &jobList, client.InNamespace(d.cluster.Namespace), ml); err != nil { + restoreList := dpv1alpha1.RestoreList{} + if err := d.cli.List(d.reqCtx.Ctx, &restoreList, client.InNamespace(d.cluster.Namespace), client.MatchingLabels(brLabels)); err != nil { return nil, err } - for i := range jobList.Items { - objs = append(objs, &jobList.Items[i]) + for i := range restoreList.Items { + objs = append(objs, &restoreList.Items[i]) } return objs, nil } @@ -584,13 +345,23 @@ func (d *backupDataClone) backup() ([]client.Object, error) { if backupPolicy == nil { return nil, intctrlutil.NewNotFound("not found any backup policy created by %s", backupPolicyTplName) } - backup := factory.BuildBackup(d.cluster, d.component, backupPolicy.Name, d.key, "datafile") + volumeSnapshotEnabled, err := isVolumeSnapshotEnabled(d.reqCtx.Ctx, d.cli, d.stsObj, backupVCT(d.component)) + if err != nil { + return nil, err + } + backupMethods := getBackupMethods(backupPolicy, volumeSnapshotEnabled) + if len(backupMethods) == 0 { + return nil, fmt.Errorf("no backup method found in backup policy %s", backupPolicy.Name) + } else if len(backupMethods) > 1 { + return nil, fmt.Errorf("more than one backup methods found in backup policy %s", backupPolicy.Name) + } + backup := factory.BuildBackup(d.cluster, d.component, backupPolicy.Name, d.key, backupMethods[0]) objs = append(objs, backup) return objs, nil } func (d *backupDataClone) checkBackupStatus() (backupStatus, error) { - backup := dataprotectionv1alpha1.Backup{} + backup := dpv1alpha1.Backup{} if err := d.cli.Get(d.reqCtx.Ctx, d.key, &backup); err != nil { if errors.IsNotFound(err) { return backupStatusNotCreated, nil @@ -598,48 +369,37 @@ func (d *backupDataClone) checkBackupStatus() (backupStatus, error) { return backupStatusFailed, err } } - if backup.Status.Phase == dataprotectionv1alpha1.BackupFailed { - return backupStatusFailed, fmt.Errorf("failed to backup: %s", backup.Status.FailureReason) + if backup.Status.Phase == dpv1alpha1.BackupPhaseFailed { + return backupStatusFailed, intctrlutil.NewErrorf(intctrlutil.ErrorTypeBackupFailed, "backup for horizontalScaling failed: %s", + backup.Status.FailureReason) } - if backup.Status.Phase == dataprotectionv1alpha1.BackupCompleted { + if backup.Status.Phase == dpv1alpha1.BackupPhaseCompleted { return backupStatusReadyToUse, nil } return backupStatusProcessing, nil } -func (d *backupDataClone) restore(pvcKey types.NamespacedName) ([]client.Object, error) { - objs := make([]client.Object, 0) - backup := dataprotectionv1alpha1.Backup{} - if err := d.cli.Get(d.reqCtx.Ctx, d.key, &backup); err != nil { +func (d *backupDataClone) restore(startingIndex int32) ([]client.Object, error) { + backup := &dpv1alpha1.Backup{} + if err := d.cli.Get(d.reqCtx.Ctx, d.key, backup); err != nil { return nil, err } - pvc := factory.BuildPVC(d.cluster, d.component, d.backupVCT(), pvcKey, "") - objs = append(objs, pvc) - backupTool := &dataprotectionv1alpha1.BackupTool{} - if err := d.cli.Get(d.reqCtx.Ctx, client.ObjectKey{Name: backup.Status.BackupToolName}, backupTool); err != nil { + restoreMGR := plan.NewRestoreManager(d.reqCtx.Ctx, d.cli, d.cluster, nil, d.getBRLabels(), int32(1), startingIndex) + restore, err := restoreMGR.BuildPrepareDataRestore(d.component, backup) + if err != nil || restore == nil { return nil, err } - restoreMgr := plan.NewRestoreManager(d.reqCtx.Ctx, d.cli, d.cluster, nil) - restoreJobs, err := restoreMgr.BuildDatafileRestoreJobByPVCS(d.baseDataClone.component, &backup, backupTool, []string{pvc.Name}, d.getBackupMatchingLabels()) - if err != nil { - return nil, err - } - objs = append(objs, restoreJobs...) - return objs, nil + return []client.Object{restore}, nil } -func (d *backupDataClone) checkRestoreStatus(pvcKey types.NamespacedName) (backupStatus, error) { - job := v1.Job{} - restoreMgr := plan.NewRestoreManager(d.reqCtx.Ctx, d.cli, d.cluster, nil) - jobName := restoreMgr.GetDatafileRestoreJobName(pvcKey.Name) - if err := d.cli.Get(d.reqCtx.Ctx, types.NamespacedName{Namespace: pvcKey.Namespace, Name: jobName}, &job); err != nil { - if errors.IsNotFound(err) { - return backupStatusNotCreated, nil - } else { - return backupStatusNotCreated, err - } +func (d *backupDataClone) checkRestoreStatus(startingIndex int32) (backupStatus, error) { + restoreMGR := plan.NewRestoreManager(d.reqCtx.Ctx, d.cli, d.cluster, nil, d.getBRLabels(), int32(1), startingIndex) + restoreMeta := restoreMGR.GetRestoreObjectMeta(d.component, dpv1alpha1.PrepareData) + restore := &dpv1alpha1.Restore{} + if err := d.cli.Get(d.reqCtx.Ctx, types.NamespacedName{Namespace: d.cluster.Namespace, Name: restoreMeta.Name}, restore); err != nil { + return backupStatusNotCreated, client.IgnoreNotFound(err) } - if job.Status.Succeeded == 1 { + if restore.Status.Phase == dpv1alpha1.RestorePhaseCompleted { return backupStatusReadyToUse, nil } return backupStatusProcessing, nil @@ -649,8 +409,8 @@ func (d *backupDataClone) checkRestoreStatus(pvcKey types.NamespacedName) (backu func getBackupPolicyFromTemplate(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster *appsv1alpha1.Cluster, - componentDef, backupPolicyTemplateName string) (*dataprotectionv1alpha1.BackupPolicy, error) { - backupPolicyList := &dataprotectionv1alpha1.BackupPolicyList{} + componentDef, backupPolicyTemplateName string) (*dpv1alpha1.BackupPolicy, error) { + backupPolicyList := &dpv1alpha1.BackupPolicyList{} if err := cli.List(reqCtx.Ctx, backupPolicyList, client.InNamespace(cluster.Namespace), client.MatchingLabels{ @@ -716,3 +476,19 @@ func isVolumeSnapshotEnabled(ctx context.Context, cli client.Client, } return false, nil } + +func getBackupMethods(backupPolicy *dpv1alpha1.BackupPolicy, useVolumeSnapshot bool) []string { + var vsMethods []string + var otherMethods []string + for _, method := range backupPolicy.Spec.BackupMethods { + if method.SnapshotVolumes != nil && *method.SnapshotVolumes { + vsMethods = append(vsMethods, method.Name) + } else { + otherMethods = append(otherMethods, method.Name) + } + } + if useVolumeSnapshot { + return vsMethods + } + return otherMethods +} diff --git a/controllers/apps/components/utils_test.go b/controllers/apps/components/utils_test.go index 7c1b6ee4857..fd38f183b96 100644 --- a/controllers/apps/components/utils_test.go +++ b/controllers/apps/components/utils_test.go @@ -293,7 +293,7 @@ var _ = Describe("Component utils test", func() { SetOwnerReferences("apps/v1", constant.StatefulSetKind, nil). AddAppInstanceLabel(clusterName). AddAppComponentLabel(compName). - AddAppManangedByLabel(). + AddAppManagedByLabel(). AddRoleLabel(role). AddConsensusSetAccessModeLabel(mode). AddControllerRevisionHashLabel(""). diff --git a/controllers/apps/operations/backup.go b/controllers/apps/operations/backup.go index 468a35432a0..f8f05112d38 100644 --- a/controllers/apps/operations/backup.go +++ b/controllers/apps/operations/backup.go @@ -28,9 +28,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) const backupTimeLayout = "20060102150405" @@ -79,7 +80,7 @@ func (b BackupOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCtx, cli cli cluster := opsRes.Cluster // get backup - backups := &dataprotectionv1alpha1.BackupList{} + backups := &dpv1alpha1.BackupList{} if err := cli.List(reqCtx.Ctx, backups, client.InNamespace(cluster.Namespace), client.MatchingLabels(getBackupLabels(cluster.Name, opsRequest.Name))); err != nil { return appsv1alpha1.OpsFailedPhase, 0, err } @@ -89,9 +90,9 @@ func (b BackupOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCtx, cli cli } // check backup status phase := backups.Items[0].Status.Phase - if phase == dataprotectionv1alpha1.BackupCompleted { + if phase == dpv1alpha1.BackupPhaseCompleted { return appsv1alpha1.OpsSucceedPhase, 0, nil - } else if phase == dataprotectionv1alpha1.BackupFailed { + } else if phase == dpv1alpha1.BackupPhaseFailed { return appsv1alpha1.OpsFailedPhase, 0, fmt.Errorf("backup failed") } return appsv1alpha1.OpsRunningPhase, 0, nil @@ -102,14 +103,12 @@ func (b BackupOpsHandler) SaveLastConfiguration(reqCtx intctrlutil.RequestCtx, c return nil } -func buildBackup(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRequest *appsv1alpha1.OpsRequest, cluster *appsv1alpha1.Cluster) (*dataprotectionv1alpha1.Backup, error) { +func buildBackup(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRequest *appsv1alpha1.OpsRequest, cluster *appsv1alpha1.Cluster) (*dpv1alpha1.Backup, error) { var err error backupSpec := opsRequest.Spec.BackupSpec if backupSpec == nil { - backupSpec = &appsv1alpha1.BackupSpec{ - BackupType: string(dataprotectionv1alpha1.BackupTypeDataFile), - } + backupSpec = &appsv1alpha1.BackupSpec{} } if len(backupSpec.BackupName) == 0 { @@ -121,15 +120,15 @@ func buildBackup(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRequest *a return nil, err } - backup := &dataprotectionv1alpha1.Backup{ + backup := &dpv1alpha1.Backup{ ObjectMeta: metav1.ObjectMeta{ Name: backupSpec.BackupName, Namespace: cluster.Namespace, Labels: getBackupLabels(cluster.Name, opsRequest.Name), }, - Spec: dataprotectionv1alpha1.BackupSpec{ + Spec: dpv1alpha1.BackupSpec{ BackupPolicyName: backupSpec.BackupPolicyName, - BackupType: dataprotectionv1alpha1.BackupType(backupSpec.BackupType), + BackupMethod: backupSpec.BackupMethod, }, } @@ -142,28 +141,28 @@ func getDefaultBackupPolicy(reqCtx intctrlutil.RequestCtx, cli client.Client, cl return backupPolicy, nil } - backupPolicyList := &dataprotectionv1alpha1.BackupPolicyList{} + backupPolicyList := &dpv1alpha1.BackupPolicyList{} if err := cli.List(reqCtx.Ctx, backupPolicyList, client.InNamespace(cluster.Namespace), client.MatchingLabels(map[string]string{ constant.AppInstanceLabelKey: cluster.Name, })); err != nil { return "", err } - defaultBackupPolicys := &dataprotectionv1alpha1.BackupPolicyList{} + defaultBackupPolices := &dpv1alpha1.BackupPolicyList{} for _, backupPolicy := range backupPolicyList.Items { - if backupPolicy.GetAnnotations()[constant.DefaultBackupPolicyAnnotationKey] == "true" { - defaultBackupPolicys.Items = append(defaultBackupPolicys.Items, backupPolicy) + if backupPolicy.GetAnnotations()[dptypes.DefaultBackupPolicyAnnotationKey] == "true" { + defaultBackupPolices.Items = append(defaultBackupPolices.Items, backupPolicy) } } - if len(defaultBackupPolicys.Items) == 0 { + if len(defaultBackupPolices.Items) == 0 { return "", fmt.Errorf(`not found any default backup policy for cluster "%s"`, cluster.Name) } - if len(defaultBackupPolicys.Items) > 1 { + if len(defaultBackupPolices.Items) > 1 { return "", fmt.Errorf(`cluster "%s" has multiple default backup policies`, cluster.Name) } - return defaultBackupPolicys.Items[0].GetName(), nil + return defaultBackupPolices.Items[0].GetName(), nil } func getBackupLabels(cluster, request string) map[string]string { diff --git a/controllers/apps/operations/switchover_test.go b/controllers/apps/operations/switchover_test.go index e81f14c70d0..92f2ef5315a 100644 --- a/controllers/apps/operations/switchover_test.go +++ b/controllers/apps/operations/switchover_test.go @@ -153,7 +153,7 @@ var _ = Describe("", func() { AddContainer(container). AddAppInstanceLabel(clusterObj.Name). AddAppComponentLabel(consensusComp). - AddAppManangedByLabel(). + AddAppManagedByLabel(). SetReplicas(2). Create(&testCtx).GetObject() diff --git a/controllers/apps/operations/switchover_util_test.go b/controllers/apps/operations/switchover_util_test.go index 5a3a90e83cf..e241cfe06fe 100644 --- a/controllers/apps/operations/switchover_util_test.go +++ b/controllers/apps/operations/switchover_util_test.go @@ -98,7 +98,7 @@ var _ = Describe("Switchover Util", func() { AddContainer(container). AddAppInstanceLabel(clusterObj.Name). AddAppComponentLabel(testapps.DefaultRedisCompSpecName). - AddAppManangedByLabel(). + AddAppManagedByLabel(). SetReplicas(2). Create(&testCtx).GetObject() @@ -153,7 +153,7 @@ var _ = Describe("Switchover Util", func() { AddContainer(container). AddAppInstanceLabel(clusterObj.Name). AddAppComponentLabel(testapps.DefaultRedisCompSpecName). - AddAppManangedByLabel(). + AddAppManagedByLabel(). SetReplicas(2). Create(&testCtx).GetObject() diff --git a/controllers/apps/operations/util/common_util.go b/controllers/apps/operations/util/common_util.go index c7667b286fa..cb1047e478f 100644 --- a/controllers/apps/operations/util/common_util.go +++ b/controllers/apps/operations/util/common_util.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/constant" ) @@ -100,7 +100,7 @@ func GetOpsRequestSliceFromCluster(cluster *appsv1alpha1.Cluster) ([]appsv1alpha } // GetOpsRequestFromBackup gets OpsRequest slice from cluster annotations. -func GetOpsRequestFromBackup(backup *dataprotectionv1alpha1.Backup) *appsv1alpha1.OpsRecorder { +func GetOpsRequestFromBackup(backup *dpv1alpha1.Backup) *appsv1alpha1.OpsRecorder { var ( opsRequestName string opsRequestType string diff --git a/controllers/apps/operations/util/common_util_test.go b/controllers/apps/operations/util/common_util_test.go index 70edce24c0e..a2e9eaebd3d 100644 --- a/controllers/apps/operations/util/common_util_test.go +++ b/controllers/apps/operations/util/common_util_test.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/constant" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" ) @@ -97,7 +97,7 @@ var _ = Describe("OpsRequest Controller", func() { It("Should Test Backup OpsRequest", func() { By("test GetOpsRequestFromBackup function") - backup := &dataprotectionv1alpha1.Backup{} + backup := &dpv1alpha1.Backup{} backup.Labels = map[string]string{ intctrlutil.OpsRequestNameLabelKey: "backup-ops", intctrlutil.OpsRequestTypeLabelKey: string(appsv1alpha1.BackupType), diff --git a/controllers/apps/opsrequest_controller.go b/controllers/apps/opsrequest_controller.go index 024daf5a7bb..18c3909dfc4 100644 --- a/controllers/apps/opsrequest_controller.go +++ b/controllers/apps/opsrequest_controller.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/controllers/apps/operations" opsutil "github.com/apecloud/kubeblocks/controllers/apps/operations/util" "github.com/apecloud/kubeblocks/internal/constant" @@ -84,7 +84,7 @@ func (r *OpsRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&appsv1alpha1.OpsRequest{}). Watches(&appsv1alpha1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.parseAllOpsRequest)). - Watches(&dataprotectionv1alpha1.Backup{}, handler.EnqueueRequestsFromMapFunc(r.parseBackupOpsRequest)). + Watches(&dpv1alpha1.Backup{}, handler.EnqueueRequestsFromMapFunc(r.parseBackupOpsRequest)). Complete(r) } @@ -306,7 +306,7 @@ func (r *OpsRequestReconciler) parseAllOpsRequest(ctx context.Context, object cl } func (r *OpsRequestReconciler) parseBackupOpsRequest(ctx context.Context, object client.Object) []reconcile.Request { - backup := object.(*dataprotectionv1alpha1.Backup) + backup := object.(*dpv1alpha1.Backup) var ( requests []reconcile.Request ) diff --git a/controllers/apps/opsrequest_controller_test.go b/controllers/apps/opsrequest_controller_test.go index 7f5f3faf18a..6b43a77ea30 100644 --- a/controllers/apps/opsrequest_controller_test.go +++ b/controllers/apps/opsrequest_controller_test.go @@ -37,12 +37,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" opsutil "github.com/apecloud/kubeblocks/controllers/apps/operations/util" "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" testk8s "github.com/apecloud/kubeblocks/internal/testutil/k8s" lorry "github.com/apecloud/kubeblocks/lorry/client" ) @@ -475,7 +477,8 @@ var _ = Describe("OpsRequest Controller", func() { It("HorizontalScaling via volume snapshot backup", func() { By("init backup policy template, mysql cluster and hscale ops") testk8s.MockEnableVolumeSnapshot(&testCtx, testk8s.DefaultStorageClassName) - createMysqlCluster(3) + oldReplicas := int32(3) + createMysqlCluster(oldReplicas) replicas := int32(5) ops := createClusterHscaleOps(replicas) @@ -492,11 +495,12 @@ var _ = Describe("OpsRequest Controller", func() { })).Should(Succeed()) By("mock backup status is ready, component phase should change to Updating when component is horizontally scaling.") - backupKey := types.NamespacedName{Name: fmt.Sprintf("%s-%s-scaling", + backupKey := client.ObjectKey{Name: fmt.Sprintf("%s-%s-scaling", clusterKey.Name, mysqlCompName), Namespace: testCtx.DefaultNamespace} - backup := &dataprotectionv1alpha1.Backup{} + backup := &dpv1alpha1.Backup{} Expect(k8sClient.Get(testCtx.Ctx, backupKey, backup)).Should(Succeed()) - backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted + backup.Status.Phase = dpv1alpha1.BackupPhaseCompleted + testdp.MockBackupStatusMethod(backup, testapps.DataVolumeName) Expect(k8sClient.Status().Update(testCtx.Ctx, backup)).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, clusterKey, func(g Gomega, cluster *appsv1alpha1.Cluster) { g.Expect(cluster.Status.Components[mysqlCompName].Phase).Should(Equal(appsv1alpha1.UpdatingClusterCompPhase)) @@ -508,7 +512,7 @@ var _ = Describe("OpsRequest Controller", func() { vs.Name = backupKey.Name vs.Namespace = backupKey.Namespace vs.Labels = map[string]string{ - constant.DataProtectionLabelBackupNameKey: backupKey.Name, + dptypes.DataProtectionLabelBackupNameKey: backupKey.Name, } pvcName := "" vs.Spec = snapshotv1.VolumeSnapshotSpec{ @@ -519,6 +523,35 @@ var _ = Describe("OpsRequest Controller", func() { Expect(k8sClient.Create(testCtx.Ctx, vs)).Should(Succeed()) Eventually(testapps.CheckObjExists(&testCtx, backupKey, vs, true)).Should(Succeed()) + mockComponentPVCsAndBound := func(comp *appsv1alpha1.ClusterComponentSpec) { + for i := 0; i < int(replicas); i++ { + for _, vct := range comp.VolumeClaimTemplates { + pvcKey := types.NamespacedName{ + Namespace: clusterKey.Namespace, + Name: fmt.Sprintf("%s-%s-%s-%d", vct.Name, clusterKey.Name, comp.Name, i), + } + testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcKey.Name, clusterKey.Name, + comp.Name, testapps.DataVolumeName).SetStorage(vct.Spec.Resources.Requests.Storage().String()).AddLabelsInMap(map[string]string{ + constant.AppInstanceLabelKey: clusterKey.Name, + constant.KBAppComponentLabelKey: comp.Name, + constant.AppManagedByLabelKey: constant.AppName, + }).CheckedCreate(&testCtx) + Eventually(testapps.GetAndChangeObjStatus(&testCtx, pvcKey, func(pvc *corev1.PersistentVolumeClaim) { + pvc.Status.Phase = corev1.ClaimBound + if pvc.Status.Capacity == nil { + pvc.Status.Capacity = corev1.ResourceList{} + } + pvc.Status.Capacity[corev1.ResourceStorage] = pvc.Spec.Resources.Requests[corev1.ResourceStorage] + })).Should(Succeed()) + } + } + } + + // mock pvcs have restored + mockComponentPVCsAndBound(clusterObj.Spec.GetComponentByName(mysqlCompName)) + // check restore CR and mock it to Completed + checkRestoreAndSetCompleted(clusterKey, mysqlCompName, int(replicas-oldReplicas)) + By("check the underlying workload been updated") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { diff --git a/controllers/apps/suite_test.go b/controllers/apps/suite_test.go index e668891f266..64aabda4349 100644 --- a/controllers/apps/suite_test.go +++ b/controllers/apps/suite_test.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/controllers/apps/configuration" "github.com/apecloud/kubeblocks/controllers/k8score" @@ -116,7 +116,7 @@ var _ = BeforeSuite(func() { err = appsv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = dataprotectionv1alpha1.AddToScheme(scheme.Scheme) + err = dpv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapshotv1.AddToScheme(scheme.Scheme) diff --git a/controllers/apps/systemaccount_controller.go b/controllers/apps/systemaccount_controller.go index c9ffb25a0e6..0b32e36077b 100644 --- a/controllers/apps/systemaccount_controller.go +++ b/controllers/apps/systemaccount_controller.go @@ -553,6 +553,6 @@ func (r *SystemAccountReconciler) jobCompletionHandler() *handler.Funcs { // existsOperations checks if the cluster is doing operations func existsOperations(cluster *appsv1alpha1.Cluster) bool { opsRequestMap, _ := opsutil.GetOpsRequestSliceFromCluster(cluster) - _, isRestoring := cluster.Annotations[constant.RestoreFromBackUpAnnotationKey] + _, isRestoring := cluster.Annotations[constant.RestoreFromBackupAnnotationKey] return len(opsRequestMap) > 0 || isRestoring } diff --git a/controllers/apps/transform_restore.go b/controllers/apps/transform_restore.go index 40cfdb61493..fc3ecabe729 100644 --- a/controllers/apps/transform_restore.go +++ b/controllers/apps/transform_restore.go @@ -54,19 +54,18 @@ func (t *RestoreTransformer) Transform(ctx graph.TransformContext, dag *graph.DA return err } for _, spec := range cluster.Spec.ComponentSpecs { + if cluster.Annotations[constant.RestoreFromBackupAnnotationKey] == "" { + continue + } + comp, err := components.NewComponent(reqCtx, t.Client, clusterDef, clusterVer, cluster, spec.Name, nil) if err != nil { return err } syncComp := comp.GetSynthesizedComponent() - if cluster.Annotations[constant.RestoreFromBackUpAnnotationKey] != "" { - if err = plan.DoRestore(reqCtx.Ctx, t.Client, cluster, syncComp, rscheme); err != nil { - return commitError(err) - } - } else if cluster.Annotations[constant.RestoreFromTimeAnnotationKey] != "" { - if err = plan.DoPITR(reqCtx.Ctx, t.Client, cluster, syncComp, rscheme); err != nil { - return commitError(err) - } + restoreMGR := plan.NewRestoreManager(reqCtx.Ctx, t.Client, cluster, rscheme, nil, syncComp.Replicas, 0) + if err = restoreMGR.DoRestore(syncComp); err != nil { + return commitError(err) } } return nil diff --git a/controllers/apps/transform_types.go b/controllers/apps/transform_types.go index 655013f6c3c..6cbfe141829 100644 --- a/controllers/apps/transform_types.go +++ b/controllers/apps/transform_types.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" client2 "github.com/apecloud/kubeblocks/internal/controller/client" @@ -43,7 +43,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(rscheme)) utilruntime.Must(appsv1alpha1.AddToScheme(rscheme)) - utilruntime.Must(dataprotectionv1alpha1.AddToScheme(rscheme)) + utilruntime.Must(dpv1alpha1.AddToScheme(rscheme)) utilruntime.Must(snapshotv1.AddToScheme(rscheme)) utilruntime.Must(extensionsv1alpha1.AddToScheme(rscheme)) utilruntime.Must(batchv1.AddToScheme(rscheme)) diff --git a/controllers/apps/transformer_backup_policy_tpl.go b/controllers/apps/transformer_backup_policy_tpl.go index bbac92002e3..c52382461ff 100644 --- a/controllers/apps/transformer_backup_policy_tpl.go +++ b/controllers/apps/transformer_backup_policy_tpl.go @@ -24,213 +24,360 @@ import ( "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/graph" ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - viper "github.com/apecloud/kubeblocks/internal/viperx" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) -// BackupPolicyTPLTransformer transforms the backup policy template to the backup policy. -type BackupPolicyTPLTransformer struct { +// BackupPolicyTplTransformer transforms the backup policy template to the backup policy. +type BackupPolicyTplTransformer struct { + *ClusterTransformContext + tplCount int tplIdentifier string isDefaultTemplate string + + backupPolicyTpl *appsv1alpha1.BackupPolicyTemplate + backupPolicy *appsv1alpha1.BackupPolicy + compWorkloadType appsv1alpha1.WorkloadType } -var _ graph.Transformer = &BackupPolicyTPLTransformer{} +var _ graph.Transformer = &BackupPolicyTplTransformer{} + +// Transform transforms the backup policy template to the backup policy and +// backup schedule. +func (r *BackupPolicyTplTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { + rootVertex, err := ictrltypes.FindRootVertex(dag) + if err != nil { + return err + } -func (r *BackupPolicyTPLTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*ClusterTransformContext) - clusterDefName := transCtx.ClusterDef.Name - backupPolicyTPLs := &appsv1alpha1.BackupPolicyTemplateList{} - if err := transCtx.Client.List(transCtx.Context, backupPolicyTPLs, client.MatchingLabels{constant.ClusterDefLabelKey: clusterDefName}); err != nil { + r.ClusterTransformContext = ctx.(*ClusterTransformContext) + clusterDefName := r.ClusterDef.Name + backupPolicyTpls := &appsv1alpha1.BackupPolicyTemplateList{} + if err = r.Client.List(r.Context, backupPolicyTpls, + client.MatchingLabels{constant.ClusterDefLabelKey: clusterDefName}); err != nil { return err } - r.tplCount = len(backupPolicyTPLs.Items) + r.tplCount = len(backupPolicyTpls.Items) if r.tplCount == 0 { return nil } - rootVertex, err := ictrltypes.FindRootVertex(dag) - if err != nil { - return err - } - origCluster := transCtx.OrigCluster + backupPolicyNames := map[string]struct{}{} - for _, tpl := range backupPolicyTPLs.Items { - r.isDefaultTemplate = tpl.Annotations[constant.DefaultBackupPolicyTemplateAnnotationKey] + backupScheduleNames := map[string]struct{}{} + for _, tpl := range backupPolicyTpls.Items { + r.isDefaultTemplate = tpl.Annotations[dptypes.DefaultBackupPolicyTemplateAnnotationKey] r.tplIdentifier = tpl.Spec.Identifier - for _, v := range tpl.Spec.BackupPolicies { - compDef := transCtx.ClusterDef.GetComponentDefByName(v.ComponentDefRef) + r.backupPolicyTpl = &tpl + + for i, bp := range tpl.Spec.BackupPolicies { + compDef := r.ClusterDef.GetComponentDefByName(bp.ComponentDefRef) if compDef == nil { - return intctrlutil.NewNotFound("componentDef %s not found in ClusterDefinition: %s ", v.ComponentDefRef, clusterDefName) + return intctrlutil.NewNotFound("componentDef %s not found in ClusterDefinition: %s ", + bp.ComponentDefRef, clusterDefName) } - // build the backup policy from the template. - backupPolicy, action := r.transformBackupPolicy(transCtx, v, origCluster, compDef.WorkloadType, &tpl) - - // merge cluster backup configuration into the backup policy. - r.mergeClusterBackup(transCtx, origCluster, backupPolicy) - if backupPolicy == nil { - continue + r.backupPolicy = &tpl.Spec.BackupPolicies[i] + r.compWorkloadType = compDef.WorkloadType + + transformBackupPolicy := func() (*dpv1alpha1.BackupPolicy, *ictrltypes.LifecycleVertex) { + // build the data protection backup policy from the template. + dpBackupPolicy, action := r.transformBackupPolicy() + if dpBackupPolicy == nil { + return nil, nil + } + + // if exist multiple backup policy templates and duplicate spec.identifier, + // the generated backupPolicy may have duplicate names, so it is + // necessary to check if it already exists. + if _, ok := backupPolicyNames[dpBackupPolicy.Name]; ok { + return dpBackupPolicy, nil + } + + vertex := &ictrltypes.LifecycleVertex{Obj: dpBackupPolicy, Action: action} + dag.AddVertex(vertex) + dag.Connect(rootVertex, vertex) + backupPolicyNames[dpBackupPolicy.Name] = struct{}{} + return dpBackupPolicy, vertex } - // if exist multiple backup policy templates and duplicate spec.identifier, - // the backupPolicy that may be generated may have duplicate names, and it is necessary to check if it already exists. - if _, ok := backupPolicyNames[backupPolicy.Name]; ok { - continue + transformBackupSchedule := func( + backupPolicy *dpv1alpha1.BackupPolicy, + bpVertex *ictrltypes.LifecycleVertex) { + // if backup policy is nil, it means that the backup policy template + // is invalid, backup schedule depends on backup policy, so we do + // not need to transform backup schedule. + if backupPolicy == nil { + return + } + + // only create backup schedule for the default backup policy template + // if there are multiple backup policy templates. + if r.isDefaultTemplate != trueVal && r.tplCount > 1 { + return + } + + // build the data protection backup schedule from the template. + dpBackupSchedule, action := r.transformBackupSchedule(backupPolicy) + + // merge cluster backup configuration into the backup schedule. + // If the backup schedule is nil, create a new backup schedule + // based on the cluster backup configuration. + if dpBackupSchedule == nil { + action = ictrltypes.ActionCreatePtr() + } else if action == nil { + action = ictrltypes.ActionUpdatePtr() + } + + // for a cluster, the default backup schedule is created by backup + // policy template, user can also configure cluster backup in the + // cluster custom object, such as enable cluster backup, set backup + // schedule, etc. + // We always prioritize the cluster backup configuration in the + // cluster object, so we need to merge the cluster backup configuration + // into the default backup schedule created by backup policy template + // if it exists. + dpBackupSchedule = r.mergeClusterBackup(backupPolicy, dpBackupSchedule) + if dpBackupSchedule == nil { + return + } + + // if exist multiple backup policy templates and duplicate spec.identifier, + // the backupPolicy that may be generated may have duplicate names, + // and it is necessary to check if it already exists. + if _, ok := backupScheduleNames[dpBackupSchedule.Name]; ok { + return + } + + parent := rootVertex + if bpVertex != nil { + parent = bpVertex + } + vertex := &ictrltypes.LifecycleVertex{Obj: dpBackupSchedule, Action: action} + dag.AddVertex(vertex) + dag.Connect(parent, vertex) + backupScheduleNames[dpBackupSchedule.Name] = struct{}{} } - vertex := &ictrltypes.LifecycleVertex{Obj: backupPolicy, Action: action} - dag.AddVertex(vertex) - dag.Connect(rootVertex, vertex) - backupPolicyNames[backupPolicy.Name] = struct{}{} + + // transform backup policy template to data protection backupPolicy + // and backupSchedule + policy, policyVertex := transformBackupPolicy() + transformBackupSchedule(policy, policyVertex) } } return nil } // transformBackupPolicy transforms backup policy template to backup policy. -func (r *BackupPolicyTPLTransformer) transformBackupPolicy(transCtx *ClusterTransformContext, - policyTPL appsv1alpha1.BackupPolicy, - cluster *appsv1alpha1.Cluster, - workloadType appsv1alpha1.WorkloadType, - tpl *appsv1alpha1.BackupPolicyTemplate) (*dataprotectionv1alpha1.BackupPolicy, *ictrltypes.LifecycleAction) { - backupPolicyName := DeriveBackupPolicyName(cluster.Name, policyTPL.ComponentDefRef, r.tplIdentifier) - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} - if err := transCtx.Client.Get(transCtx.Context, client.ObjectKey{Namespace: cluster.Namespace, Name: backupPolicyName}, backupPolicy); err != nil && !apierrors.IsNotFound(err) { +func (r *BackupPolicyTplTransformer) transformBackupPolicy() (*dpv1alpha1.BackupPolicy, *ictrltypes.LifecycleAction) { + cluster := r.OrigCluster + backupPolicyName := generateBackupPolicyName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) + backupPolicy := &dpv1alpha1.BackupPolicy{} + if err := r.Client.Get(r.Context, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: backupPolicyName, + }, backupPolicy); client.IgnoreNotFound(err) != nil { return nil, nil } + if len(backupPolicy.Name) == 0 { - // build a new backup policy from the backup policy template. - return r.buildBackupPolicy(policyTPL, cluster, workloadType, tpl, backupPolicyName), ictrltypes.ActionCreatePtr() + // build a new backup policy by the backup policy template. + return r.buildBackupPolicy(backupPolicyName), ictrltypes.ActionCreatePtr() } + // sync the existing backup policy with the cluster changes - r.syncBackupPolicy(backupPolicy, cluster, policyTPL, workloadType, tpl) + r.syncBackupPolicy(backupPolicy) return backupPolicy, ictrltypes.ActionUpdatePtr() } +func (r *BackupPolicyTplTransformer) transformBackupSchedule( + backupPolicy *dpv1alpha1.BackupPolicy) (*dpv1alpha1.BackupSchedule, *ictrltypes.LifecycleAction) { + cluster := r.OrigCluster + scheduleName := generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) + backupSchedule := &dpv1alpha1.BackupSchedule{} + if err := r.Client.Get(r.Context, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: scheduleName, + }, backupSchedule); client.IgnoreNotFound(err) != nil { + return nil, nil + } + + if len(backupSchedule.Name) == 0 { + // build a new backup schedule from the backup policy template. + return r.buildBackupSchedule(scheduleName, backupPolicy), ictrltypes.ActionCreatePtr() + } + return backupSchedule, nil +} + +func (r *BackupPolicyTplTransformer) buildBackupSchedule( + name string, + backupPolicy *dpv1alpha1.BackupPolicy) *dpv1alpha1.BackupSchedule { + cluster := r.OrigCluster + backupSchedule := &dpv1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cluster.Namespace, + Labels: r.buildLabels(), + Annotations: r.buildAnnotations(), + }, + Spec: dpv1alpha1.BackupScheduleSpec{ + BackupPolicyName: backupPolicy.Name, + }, + } + + var schedules []dpv1alpha1.SchedulePolicy + for _, s := range r.backupPolicy.Schedules { + schedules = append(schedules, dpv1alpha1.SchedulePolicy{ + BackupMethod: s.BackupMethod, + CronExpression: s.CronExpression, + Enabled: s.Enabled, + RetentionPeriod: r.backupPolicy.RetentionPeriod, + }) + } + backupSchedule.Spec.Schedules = schedules + return backupSchedule +} + // syncBackupPolicy syncs labels and annotations of the backup policy with the cluster changes. -func (r *BackupPolicyTPLTransformer) syncBackupPolicy(backupPolicy *dataprotectionv1alpha1.BackupPolicy, - cluster *appsv1alpha1.Cluster, - policyTPL appsv1alpha1.BackupPolicy, - workloadType appsv1alpha1.WorkloadType, - tpl *appsv1alpha1.BackupPolicyTemplate) { +func (r *BackupPolicyTplTransformer) syncBackupPolicy(backupPolicy *dpv1alpha1.BackupPolicy) { // update labels and annotations of the backup policy. if backupPolicy.Annotations == nil { backupPolicy.Annotations = map[string]string{} } - backupPolicy.Annotations[constant.DefaultBackupPolicyAnnotationKey] = r.defaultPolicyAnnotationValue() - backupPolicy.Annotations[constant.BackupPolicyTemplateAnnotationKey] = tpl.Name - if tpl.Annotations[constant.ReconfigureRefAnnotationKey] != "" { - backupPolicy.Annotations[constant.ReconfigureRefAnnotationKey] = tpl.Annotations[constant.ReconfigureRefAnnotationKey] - } if backupPolicy.Labels == nil { backupPolicy.Labels = map[string]string{} } - backupPolicy.Labels[constant.AppInstanceLabelKey] = cluster.Name - backupPolicy.Labels[constant.KBAppComponentDefRefLabelKey] = policyTPL.ComponentDefRef - backupPolicy.Labels[constant.AppManagedByLabelKey] = constant.AppName + mergeMap(backupPolicy.Annotations, r.buildAnnotations()) + mergeMap(backupPolicy.Labels, r.buildLabels()) - // only update the role labelSelector of the backup target instance when component workload is Replication/Consensus. - // because the replicas of component will change, such as 2->1. then if the target role is 'follower' and replicas is 1, - // the target instance can not be found. so we sync the label selector automatically. - if !slices.Contains([]appsv1alpha1.WorkloadType{appsv1alpha1.Replication, appsv1alpha1.Consensus}, workloadType) { + // only update the role labelSelector of the backup target instance when + // component workload is Replication/Consensus. Because the replicas of + // component will change, such as 2->1. then if the target role is 'follower' + // and replicas is 1, the target instance can not be found. so we sync the + // label selector automatically. + if !workloadHasRoleLabel(r.compWorkloadType) { return } - component := r.getFirstComponent(cluster, policyTPL.ComponentDefRef) - if component == nil { + + comp := r.getClusterComponentSpec() + if comp == nil { return } + // convert role labelSelector based on the replicas of the component automatically. - syncTheRoleLabel := func(target dataprotectionv1alpha1.TargetCluster, - basePolicy appsv1alpha1.BasePolicy) dataprotectionv1alpha1.TargetCluster { - role := basePolicy.Target.Role - if len(role) == 0 { - return target - } - if target.LabelsSelector == nil || target.LabelsSelector.MatchLabels == nil { - target.LabelsSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} - } - if component.Replicas == 1 { - // if replicas is 1, remove the role label selector. - delete(target.LabelsSelector.MatchLabels, constant.RoleLabelKey) - } else { - target.LabelsSelector.MatchLabels[constant.RoleLabelKey] = role - } - return target + // TODO(ldm): need more review. + role := r.backupPolicy.Target.Role + if len(role) == 0 { + return } - if backupPolicy.Spec.Snapshot != nil && policyTPL.Snapshot != nil { - backupPolicy.Spec.Snapshot.Target = syncTheRoleLabel(backupPolicy.Spec.Snapshot.Target, - policyTPL.Snapshot.BasePolicy) + + podSelector := backupPolicy.Spec.Target.PodSelector + if podSelector.LabelSelector == nil || podSelector.LabelSelector.MatchLabels == nil { + podSelector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} } - if backupPolicy.Spec.Datafile != nil && policyTPL.Datafile != nil { - backupPolicy.Spec.Datafile.Target = syncTheRoleLabel(backupPolicy.Spec.Datafile.Target, - policyTPL.Datafile.BasePolicy) + if r.getCompReplicas() == 1 { + delete(podSelector.LabelSelector.MatchLabels, constant.RoleLabelKey) + } else { + podSelector.LabelSelector.MatchLabels[constant.RoleLabelKey] = role } - if backupPolicy.Spec.Logfile != nil && policyTPL.Logfile != nil { - backupPolicy.Spec.Logfile.Target = syncTheRoleLabel(backupPolicy.Spec.Logfile.Target, - policyTPL.Logfile.BasePolicy) +} + +func (r *BackupPolicyTplTransformer) getCompReplicas() int32 { + rsm := &workloads.ReplicatedStateMachine{} + compSpec := r.getClusterComponentSpec() + rsmName := fmt.Sprintf("%s-%s", r.Cluster.Name, compSpec.Name) + if err := r.Client.Get(r.Context, client.ObjectKey{Name: rsmName, Namespace: r.Cluster.Namespace}, rsm); err != nil { + return compSpec.Replicas } + return *rsm.Spec.Replicas } -// buildBackupPolicy builds a new backup policy from the backup policy template. -func (r *BackupPolicyTPLTransformer) buildBackupPolicy(policyTPL appsv1alpha1.BackupPolicy, - cluster *appsv1alpha1.Cluster, - workloadType appsv1alpha1.WorkloadType, - tpl *appsv1alpha1.BackupPolicyTemplate, - backupPolicyName string) *dataprotectionv1alpha1.BackupPolicy { - component := r.getFirstComponent(cluster, policyTPL.ComponentDefRef) - if component == nil { +// buildBackupPolicy builds a new backup policy by the backup policy template. +func (r *BackupPolicyTplTransformer) buildBackupPolicy(backupPolicyName string) *dpv1alpha1.BackupPolicy { + comp := r.getClusterComponentSpec() + if comp == nil { return nil } - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{ + cluster := r.OrigCluster + backupPolicy := &dpv1alpha1.BackupPolicy{ ObjectMeta: metav1.ObjectMeta{ - Name: backupPolicyName, - Namespace: cluster.Namespace, - Labels: map[string]string{ - constant.AppInstanceLabelKey: cluster.Name, - constant.KBAppComponentDefRefLabelKey: policyTPL.ComponentDefRef, - constant.AppManagedByLabelKey: constant.AppName, - }, - Annotations: map[string]string{ - constant.DefaultBackupPolicyAnnotationKey: r.defaultPolicyAnnotationValue(), - constant.BackupPolicyTemplateAnnotationKey: tpl.Name, - constant.BackupDataPathPrefixAnnotationKey: fmt.Sprintf("/%s-%s/%s", cluster.Name, cluster.UID, component.Name), - }, + Name: backupPolicyName, + Namespace: cluster.Namespace, + Labels: r.buildLabels(), + Annotations: r.buildAnnotations(), }, } - if tpl.Annotations[constant.ReconfigureRefAnnotationKey] != "" { - backupPolicy.Annotations[constant.ReconfigureRefAnnotationKey] = tpl.Annotations[constant.ReconfigureRefAnnotationKey] - } + bpSpec := backupPolicy.Spec - if policyTPL.Retention != nil { - bpSpec.Retention = &dataprotectionv1alpha1.RetentionSpec{ - TTL: policyTPL.Retention.TTL, - } - } - bpSpec.Schedule.StartingDeadlineMinutes = policyTPL.Schedule.StartingDeadlineMinutes - bpSpec.Schedule.Snapshot = r.convertSchedulePolicy(policyTPL.Schedule.Snapshot) - bpSpec.Schedule.Datafile = r.convertSchedulePolicy(policyTPL.Schedule.Datafile) - bpSpec.Schedule.Logfile = r.convertSchedulePolicy(policyTPL.Schedule.Logfile) - bpSpec.Datafile = r.convertCommonPolicy(policyTPL.Datafile, cluster.Name, *component, workloadType) - bpSpec.Logfile = r.convertCommonPolicy(policyTPL.Logfile, cluster.Name, *component, workloadType) - bpSpec.Snapshot = r.convertSnapshotPolicy(policyTPL.Snapshot, cluster.Name, *component, workloadType) + bpSpec.BackupMethods = r.backupPolicy.BackupMethods + bpSpec.PathPrefix = buildBackupPathPrefix(cluster, comp.Name) + bpSpec.Target = r.buildBackupTarget(comp) backupPolicy.Spec = bpSpec return backupPolicy } -// mergeClusterBackup merges the cluster backup configuration into the backup policy. -func (r *BackupPolicyTPLTransformer) mergeClusterBackup(transCtx *ClusterTransformContext, cluster *appsv1alpha1.Cluster, - backupPolicy *dataprotectionv1alpha1.BackupPolicy) { +func (r *BackupPolicyTplTransformer) buildBackupTarget( + comp *appsv1alpha1.ClusterComponentSpec) *dpv1alpha1.BackupTarget { + targetTpl := r.backupPolicy.Target + clusterName := r.OrigCluster.Name + + getSAName := func() string { + if comp.ServiceAccountName != "" { + return comp.ServiceAccountName + } + return "kb-" + r.Cluster.Name + } + // build the target connection credential + cc := dpv1alpha1.ConnectionCredential{} + if len(targetTpl.Account) > 0 { + cc.SecretName = fmt.Sprintf("%s-%s-%s", clusterName, comp.Name, targetTpl.Account) + cc.PasswordKey = constant.AccountPasswdForSecret + cc.PasswordKey = constant.AccountNameForSecret + } else { + cc.SecretName = fmt.Sprintf("%s-conn-credential", clusterName) + ccKey := targetTpl.ConnectionCredentialKey + if ccKey.PasswordKey != nil { + cc.PasswordKey = *ccKey.PasswordKey + } + if ccKey.UsernameKey != nil { + cc.UsernameKey = *ccKey.UsernameKey + } + if ccKey.PortKey != nil { + cc.PortKey = *ccKey.PortKey + } + if ccKey.HostKey != nil { + cc.HostKey = *ccKey.HostKey + } + } + + target := &dpv1alpha1.BackupTarget{ + PodSelector: &dpv1alpha1.PodSelector{ + Strategy: dpv1alpha1.PodSelectionStrategyAny, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: r.buildTargetPodLabels(comp), + }, + }, + ConnectionCredential: &cc, + ServiceAccountName: getSAName(), + } + return target +} + +func (r *BackupPolicyTplTransformer) mergeClusterBackup( + backupPolicy *dpv1alpha1.BackupPolicy, + backupSchedule *dpv1alpha1.BackupSchedule) *dpv1alpha1.BackupSchedule { + cluster := r.OrigCluster backupEnabled := func() bool { return cluster.Spec.Backup != nil && boolValue(cluster.Spec.Backup.Enabled) } @@ -238,243 +385,142 @@ func (r *BackupPolicyTPLTransformer) mergeClusterBackup(transCtx *ClusterTransfo if backupPolicy == nil || cluster.Spec.Backup == nil { // backup policy is nil, can not enable cluster backup, so record event and return. if backupEnabled() { - transCtx.EventRecorder.Event(transCtx.Cluster, corev1.EventTypeWarning, + r.EventRecorder.Event(r.Cluster, corev1.EventTypeWarning, "BackupPolicyNotFound", "backup policy is nil, can not enable cluster backup") } - return + return nil } backup := cluster.Spec.Backup - spec := &backupPolicy.Spec - setSchedulePolicy := func(schedulePolicy *dataprotectionv1alpha1.SchedulePolicy, enable bool) { - if schedulePolicy == nil { - if enable { - // failed to find the schedule policy for backup method, so record event and return. - transCtx.EventRecorder.Eventf(transCtx.Cluster, corev1.EventTypeWarning, "BackupSchedulePolicyNotFound", - "failed to find the schedule policy for backup method %s", backup.Method) - } - return - } - schedulePolicy.Enable = enable - if enable && backup.CronExpression != "" { - schedulePolicy.CronExpression = backup.CronExpression - } - } - - // disable automated backup, set all backup schedule to false - if !backupEnabled() { - setSchedulePolicy(spec.Schedule.Snapshot, false) - setSchedulePolicy(spec.Schedule.Datafile, false) - setSchedulePolicy(spec.Schedule.Logfile, false) - return - } - - if backup.RetentionPeriod != nil { - spec.Retention = &dataprotectionv1alpha1.RetentionSpec{ - TTL: backup.RetentionPeriod, + // there is no backup schedule created by backup policy template, so we need to + // create a new backup schedule for cluster backup. + if backupSchedule == nil { + backupSchedule = &dpv1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier), + Namespace: cluster.Namespace, + Labels: r.buildLabels(), + Annotations: r.buildAnnotations(), + }, + Spec: dpv1alpha1.BackupScheduleSpec{ + BackupPolicyName: backupPolicy.Name, + StartingDeadlineMinutes: backup.StartingDeadlineMinutes, + Schedules: []dpv1alpha1.SchedulePolicy{}, + }, } } - if backup.StartingDeadlineMinutes != nil { - spec.Schedule.StartingDeadlineMinutes = backup.StartingDeadlineMinutes - } - - var commonBackupPolicy *dataprotectionv1alpha1.CommonBackupPolicy - switch backup.Method { - case dataprotectionv1alpha1.BackupMethodSnapshot: - // enable snapshot and disable datafile - setSchedulePolicy(spec.Schedule.Snapshot, true) - setSchedulePolicy(spec.Schedule.Datafile, false) - case dataprotectionv1alpha1.BackupMethodBackupTool: - // disable snapshot and enable datafile - setSchedulePolicy(spec.Schedule.Snapshot, false) - setSchedulePolicy(spec.Schedule.Datafile, true) - commonBackupPolicy = spec.Datafile + // build backup schedule policy by cluster backup spec + sp := &dpv1alpha1.SchedulePolicy{ + Enabled: backup.Enabled, + RetentionPeriod: backup.RetentionPeriod, + BackupMethod: backup.Method, + CronExpression: backup.CronExpression, } - setRepoName := func(bp *dataprotectionv1alpha1.CommonBackupPolicy) { - if backup.RepoName == "" || bp == nil { - return + // merge cluster backup schedule policy into backup schedule, if the backup + // schedule with specified method already exists, we need to update it + // using the cluster backup schedule policy. Otherwise, we need to append + // it to the backup schedule. + for i, s := range backupSchedule.Spec.Schedules { + if s.BackupMethod == backup.Method { + mergeSchedulePolicy(sp, &backupSchedule.Spec.Schedules[i]) + return backupSchedule } - bp.BackupRepoName = &backup.RepoName - } - setRepoName(commonBackupPolicy) - setRepoName(spec.Logfile) - - pitrEnabled := boolValue(backup.PITREnabled) - if backupPolicy.Spec.Schedule.Logfile != nil { - backupPolicy.Spec.Schedule.Logfile.Enable = pitrEnabled - } else if pitrEnabled { - // TODO: if backupPolicy.Spec.Schedule.Logfile is nil, and backup.PITREnabled is true, - // should we create a new SchedulePolicy for logfile? - // Now, hscale also maintains a backupPolicy, we can not distinguish the backupPolicy for backup - // or hscale, so we can not create a new SchedulePolicy for logfile. - // Need a method to distinguish the backupPolicy for backup or hscale in the future. - transCtx.EventRecorder.Eventf(transCtx.Cluster, corev1.EventTypeWarning, - "BackupSchedulePolicyNotFound", "failed to find the schedule policy for PITR") } + backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, *sp) + return backupSchedule } -// getFirstComponent returns the first component name of the componentDefRef. -func (r *BackupPolicyTPLTransformer) getFirstComponent(cluster *appsv1alpha1.Cluster, - componentDefRef string) *appsv1alpha1.ClusterComponentSpec { - for _, v := range cluster.Spec.ComponentSpecs { - if v.ComponentDefRef == componentDefRef { +// getClusterComponentSpec returns the first component name of the componentDefRef. +func (r *BackupPolicyTplTransformer) getClusterComponentSpec() *appsv1alpha1.ClusterComponentSpec { + for _, v := range r.OrigCluster.Spec.ComponentSpecs { + if v.ComponentDefRef == r.backupPolicy.ComponentDefRef { return &v } } return nil } -// convertSchedulePolicy converts the schedulePolicy from backupPolicyTemplate. -func (r *BackupPolicyTPLTransformer) convertSchedulePolicy(sp *appsv1alpha1.SchedulePolicy) *dataprotectionv1alpha1.SchedulePolicy { - if sp == nil { - return nil - } - return &dataprotectionv1alpha1.SchedulePolicy{ - Enable: sp.Enable, - CronExpression: sp.CronExpression, +func (r *BackupPolicyTplTransformer) defaultPolicyAnnotationValue() string { + if r.tplCount > 1 && r.isDefaultTemplate != trueVal { + return "false" } + return trueVal } -// convertBasePolicy converts the basePolicy from backupPolicyTemplate. -func (r *BackupPolicyTPLTransformer) convertBasePolicy(bp appsv1alpha1.BasePolicy, - clusterName string, - component appsv1alpha1.ClusterComponentSpec, - workloadType appsv1alpha1.WorkloadType) dataprotectionv1alpha1.BasePolicy { - basePolicy := dataprotectionv1alpha1.BasePolicy{ - Target: dataprotectionv1alpha1.TargetCluster{ - LabelsSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: component.Name, - constant.AppManagedByLabelKey: constant.AppName, - }, - }, - }, - BackupsHistoryLimit: bp.BackupsHistoryLimit, - OnFailAttempted: bp.OnFailAttempted, - } - if len(bp.BackupStatusUpdates) != 0 { - backupStatusUpdates := make([]dataprotectionv1alpha1.BackupStatusUpdate, len(bp.BackupStatusUpdates)) - for i, v := range bp.BackupStatusUpdates { - backupStatusUpdates[i] = dataprotectionv1alpha1.BackupStatusUpdate{ - Path: v.Path, - ContainerName: v.ContainerName, - Script: v.Script, - UseTargetPodServiceAccount: v.UseTargetPodServiceAccount, - UpdateStage: dataprotectionv1alpha1.BackupStatusUpdateStage(v.UpdateStage), - } - } - basePolicy.BackupStatusUpdates = backupStatusUpdates - } - switch workloadType { - case appsv1alpha1.Replication, appsv1alpha1.Consensus: - if len(bp.Target.Role) > 0 && component.Replicas > 1 { - // the role only works when the component has multiple replicas. - basePolicy.Target.LabelsSelector.MatchLabels[constant.RoleLabelKey] = bp.Target.Role - } +func (r *BackupPolicyTplTransformer) buildAnnotations() map[string]string { + annotations := map[string]string{ + dptypes.DefaultBackupPolicyAnnotationKey: r.defaultPolicyAnnotationValue(), + constant.BackupPolicyTemplateAnnotationKey: r.backupPolicyTpl.Name, } - // build the target secret. - if len(bp.Target.Account) > 0 { - basePolicy.Target.Secret = &dataprotectionv1alpha1.BackupPolicySecret{ - Name: fmt.Sprintf("%s-%s-%s", clusterName, component.Name, bp.Target.Account), - PasswordKey: constant.AccountPasswdForSecret, - UsernameKey: constant.AccountNameForSecret, - } - } else { - basePolicy.Target.Secret = &dataprotectionv1alpha1.BackupPolicySecret{ - Name: fmt.Sprintf("%s-conn-credential", clusterName), - } - connectionCredentialKey := bp.Target.ConnectionCredentialKey - if connectionCredentialKey.PasswordKey != nil { - basePolicy.Target.Secret.PasswordKey = *connectionCredentialKey.PasswordKey - } - if connectionCredentialKey.UsernameKey != nil { - basePolicy.Target.Secret.UsernameKey = *connectionCredentialKey.UsernameKey - } + if r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] != "" { + annotations[dptypes.ReconfigureRefAnnotationKey] = r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] } - return basePolicy + return annotations } -// convertBaseBackupSchedulePolicy converts the snapshotPolicy from backupPolicyTemplate. -func (r *BackupPolicyTPLTransformer) convertSnapshotPolicy(sp *appsv1alpha1.SnapshotPolicy, - clusterName string, - component appsv1alpha1.ClusterComponentSpec, - workloadType appsv1alpha1.WorkloadType) *dataprotectionv1alpha1.SnapshotPolicy { - if sp == nil { - return nil - } - snapshotPolicy := &dataprotectionv1alpha1.SnapshotPolicy{ - BasePolicy: r.convertBasePolicy(sp.BasePolicy, clusterName, component, workloadType), +func (r *BackupPolicyTplTransformer) buildLabels() map[string]string { + return map[string]string{ + constant.AppInstanceLabelKey: r.OrigCluster.Name, + constant.KBAppComponentDefRefLabelKey: r.backupPolicy.ComponentDefRef, + constant.AppManagedByLabelKey: constant.AppName, } - if sp.Hooks != nil { - snapshotPolicy.Hooks = &dataprotectionv1alpha1.BackupPolicyHook{ - PreCommands: sp.Hooks.PreCommands, - PostCommands: sp.Hooks.PostCommands, - ContainerName: sp.Hooks.ContainerName, - Image: sp.Hooks.Image, - } - } - return snapshotPolicy } -// convertBaseBackupSchedulePolicy converts the commonPolicy from backupPolicyTemplate. -func (r *BackupPolicyTPLTransformer) convertCommonPolicy(bp *appsv1alpha1.CommonBackupPolicy, - clusterName string, - component appsv1alpha1.ClusterComponentSpec, - workloadType appsv1alpha1.WorkloadType) *dataprotectionv1alpha1.CommonBackupPolicy { - if bp == nil { - return nil - } - defaultCreatePolicy := dataprotectionv1alpha1.CreatePVCPolicyIfNotPresent - globalCreatePolicy := viper.GetString(constant.CfgKeyBackupPVCCreatePolicy) - if dataprotectionv1alpha1.CreatePVCPolicy(globalCreatePolicy) == dataprotectionv1alpha1.CreatePVCPolicyNever { - defaultCreatePolicy = dataprotectionv1alpha1.CreatePVCPolicyNever - } - defaultInitCapacity := constant.DefaultBackupPvcInitCapacity - globalInitCapacity := viper.GetString(constant.CfgKeyBackupPVCInitCapacity) - if len(globalInitCapacity) != 0 { - defaultInitCapacity = globalInitCapacity - } - // set the persistent volume configmap infos if these variables exist. - globalPVConfigMapName := viper.GetString(constant.CfgKeyBackupPVConfigmapName) - globalPVConfigMapNamespace := viper.GetString(constant.CfgKeyBackupPVConfigmapNamespace) - var persistentVolumeConfigMap *dataprotectionv1alpha1.PersistentVolumeConfigMap - if globalPVConfigMapName != "" && globalPVConfigMapNamespace != "" { - persistentVolumeConfigMap = &dataprotectionv1alpha1.PersistentVolumeConfigMap{ - Name: globalPVConfigMapName, - Namespace: globalPVConfigMapNamespace, - } - } - globalStorageClass := viper.GetString(constant.CfgKeyBackupPVCStorageClass) - var storageClassName *string - if globalStorageClass != "" { - storageClassName = &globalStorageClass - } - return &dataprotectionv1alpha1.CommonBackupPolicy{ - BackupToolName: bp.BackupToolName, - PersistentVolumeClaim: dataprotectionv1alpha1.PersistentVolumeClaim{ - InitCapacity: resource.MustParse(defaultInitCapacity), - CreatePolicy: defaultCreatePolicy, - PersistentVolumeConfigMap: persistentVolumeConfigMap, - StorageClassName: storageClassName, - }, - BasePolicy: r.convertBasePolicy(bp.BasePolicy, clusterName, component, workloadType), - } +// buildTargetPodLabels builds the target labels for the backup policy that will be +// used to select the target pod. +func (r *BackupPolicyTplTransformer) buildTargetPodLabels(comp *appsv1alpha1.ClusterComponentSpec) map[string]string { + labels := map[string]string{ + constant.AppInstanceLabelKey: r.OrigCluster.Name, + constant.KBAppComponentLabelKey: comp.Name, + constant.AppManagedByLabelKey: constant.AppName, + } + // append label to filter specific role of the component. + targetTpl := &r.backupPolicy.Target + if workloadHasRoleLabel(r.compWorkloadType) && + len(targetTpl.Role) > 0 && r.getCompReplicas() > 1 { + // the role only works when the component has multiple replicas. + labels[constant.RoleLabelKey] = targetTpl.Role + } + return labels } -func (r *BackupPolicyTPLTransformer) defaultPolicyAnnotationValue() string { - if r.tplCount > 1 && r.isDefaultTemplate != trueVal { - return "false" +// generateBackupPolicyName generates the backup policy name which is created from backup policy template. +func generateBackupPolicyName(clusterName, componentDef, identifier string) string { + if len(identifier) == 0 { + return fmt.Sprintf("%s-%s-backup-policy", clusterName, componentDef) } - return trueVal + return fmt.Sprintf("%s-%s-backup-policy-%s", clusterName, componentDef, identifier) } -// DeriveBackupPolicyName generates the backup policy name which is created from backup policy template. -func DeriveBackupPolicyName(clusterName, componentDef, identifier string) string { +// generateBackupScheduleName generates the backup schedule name which is created from backup policy template. +func generateBackupScheduleName(clusterName, componentDef, identifier string) string { if len(identifier) == 0 { - return fmt.Sprintf("%s-%s-backup-policy", clusterName, componentDef) + return fmt.Sprintf("%s-%s-backup-schedule", clusterName, componentDef) + } + return fmt.Sprintf("%s-%s-backup-schedule-%s", clusterName, componentDef, identifier) +} + +func buildBackupPathPrefix(cluster *appsv1alpha1.Cluster, compName string) string { + return fmt.Sprintf("/%s-%s/%s", cluster.Name, cluster.UID, compName) +} + +func workloadHasRoleLabel(workloadType appsv1alpha1.WorkloadType) bool { + return slices.Contains([]appsv1alpha1.WorkloadType{appsv1alpha1.Replication, appsv1alpha1.Consensus}, workloadType) +} + +func mergeSchedulePolicy(src *dpv1alpha1.SchedulePolicy, dst *dpv1alpha1.SchedulePolicy) { + if src.Enabled != nil { + dst.Enabled = src.Enabled + } + if src.RetentionPeriod.String() != "" { + dst.RetentionPeriod = src.RetentionPeriod + } + if src.BackupMethod != "" { + dst.BackupMethod = src.BackupMethod + } + if src.CronExpression != "" { + dst.CronExpression = src.CronExpression } - return fmt.Sprintf("%s-%s-backup-policy-%s", clusterName, componentDef, identifier) } diff --git a/controllers/apps/transformer_cluster_deletion.go b/controllers/apps/transformer_cluster_deletion.go index 5a812ad5847..2a0d62875a6 100644 --- a/controllers/apps/transformer_cluster_deletion.go +++ b/controllers/apps/transformer_cluster_deletion.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/graph" @@ -210,8 +210,10 @@ func kindsForDelete() ([]client.ObjectList, []client.ObjectList) { &corev1.SecretList{}, &corev1.ConfigMapList{}, &corev1.PersistentVolumeClaimList{}, - &dataprotectionv1alpha1.BackupPolicyList{}, + &dpv1alpha1.BackupPolicyList{}, + &dpv1alpha1.BackupScheduleList{}, &batchv1.JobList{}, + &dpv1alpha1.RestoreList{}, } return append(namespacedKinds, namespacedKindsPlus...), nonNamespacedKinds } @@ -219,7 +221,7 @@ func kindsForDelete() ([]client.ObjectList, []client.ObjectList) { func kindsForWipeOut() ([]client.ObjectList, []client.ObjectList) { namespacedKinds, nonNamespacedKinds := kindsForDelete() namespacedKindsPlus := []client.ObjectList{ - &dataprotectionv1alpha1.BackupList{}, + &dpv1alpha1.BackupList{}, } return append(namespacedKinds, namespacedKindsPlus...), nonNamespacedKinds } diff --git a/controllers/apps/transformer_rbac.go b/controllers/apps/transformer_rbac.go index 079c1c0290b..fad83c24da7 100644 --- a/controllers/apps/transformer_rbac.go +++ b/controllers/apps/transformer_rbac.go @@ -38,6 +38,7 @@ import ( "github.com/apecloud/kubeblocks/internal/controller/graph" ictrltypes "github.com/apecloud/kubeblocks/internal/controller/types" ictrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -273,7 +274,7 @@ func getDefaultBackupPolicyTemplate(transCtx *ClusterTransformContext, clusterDe return nil, nil } for _, item := range backupPolicyTPLs.Items { - if item.Annotations[constant.DefaultBackupPolicyTemplateAnnotationKey] == trueVal { + if item.Annotations[dptypes.DefaultBackupPolicyTemplateAnnotationKey] == trueVal { return &item, nil } } diff --git a/controllers/apps/utils.go b/controllers/apps/utils.go index bbf32f4e063..d886bc3c7e2 100644 --- a/controllers/apps/utils.go +++ b/controllers/apps/utils.go @@ -87,3 +87,9 @@ func boolValue(b *bool) bool { } return *b } + +func mergeMap(dst, src map[string]string) { + for key, val := range src { + dst[key] = val + } +} diff --git a/controllers/dataprotection/actionset_controller.go b/controllers/dataprotection/actionset_controller.go new file mode 100644 index 00000000000..6d718ca9b1e --- /dev/null +++ b/controllers/dataprotection/actionset_controller.go @@ -0,0 +1,103 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" +) + +// ActionSetReconciler reconciles a ActionSet object +type ActionSetReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=actionsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=actionsets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=actionsets/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the actionset closer to the desired state. +func (r *ActionSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqCtx := intctrlutil.RequestCtx{ + Ctx: ctx, + Req: req, + Log: log.FromContext(ctx).WithValues("actionSet", req.Name), + Recorder: r.Recorder, + } + + actionSet := &dpv1alpha1.ActionSet{} + if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, actionSet); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + + // handle finalizer + res, err := intctrlutil.HandleCRDeletion(reqCtx, r, actionSet, dptypes.DataProtectionFinalizerName, + func() (*ctrl.Result, error) { + return nil, r.deleteExternalResources(reqCtx, actionSet) + }) + if res != nil { + return *res, err + } + + if actionSet.Status.ObservedGeneration == actionSet.Generation && + actionSet.Status.Phase.IsAvailable() { + return ctrl.Result{}, nil + } + + patchStatus := func(phase dpv1alpha1.Phase, message string) error { + patch := client.MergeFrom(actionSet.DeepCopy()) + actionSet.Status.Phase = phase + actionSet.Status.Message = message + actionSet.Status.ObservedGeneration = actionSet.Generation + return r.Client.Status().Patch(reqCtx.Ctx, actionSet, patch) + } + + // TODO(ldm): validate actionSet + + if err := patchStatus(dpv1alpha1.AvailablePhase, ""); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + intctrlutil.RecordCreatedEvent(r.Recorder, actionSet) + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ActionSetReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dpv1alpha1.ActionSet{}).Complete(r) +} + +func (r *ActionSetReconciler) deleteExternalResources( + _ intctrlutil.RequestCtx, + _ *dpv1alpha1.ActionSet) error { + return nil +} diff --git a/controllers/dataprotection/actionset_controller_test.go b/controllers/dataprotection/actionset_controller_test.go new file mode 100644 index 00000000000..e224f7da1d6 --- /dev/null +++ b/controllers/dataprotection/actionset_controller_test.go @@ -0,0 +1,61 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/client" + + intctrlutil "github.com/apecloud/kubeblocks/internal/generics" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" +) + +var _ = Describe("ActionSet Controller test", func() { + cleanEnv := func() { + // must wait till resources deleted and no longer existed before the testcases start, + // otherwise if later it needs to create some new resource objects with the same name, + // in race conditions, it will find the existence of old objects, resulting failure to + // create the new objects. + By("clean resources") + + ml := client.HasLabels{testCtx.TestObjLabelKey} + + // non-namespaced + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.ActionSetSignature, true, ml) + } + + BeforeEach(func() { + cleanEnv() + }) + + AfterEach(func() { + cleanEnv() + }) + + Context("create a actionSet", func() { + It("should be available", func() { + as := testdp.NewFakeActionSet(&testCtx) + Expect(as).ShouldNot(BeNil()) + }) + }) +}) diff --git a/controllers/dataprotection/backup_controller.go b/controllers/dataprotection/backup_controller.go index a73eef996aa..7f5122771d1 100644 --- a/controllers/dataprotection/backup_controller.go +++ b/controllers/dataprotection/backup_controller.go @@ -22,28 +22,19 @@ package dataprotection import ( "context" "encoding/json" - "errors" "fmt" - "math" "reflect" - "sort" - "strconv" - "strings" "time" - snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v3/apis/volumesnapshot/v1beta1" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" - "github.com/leaanthony/debme" + vsv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v3/apis/volumesnapshot/v1beta1" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" @@ -51,36 +42,29 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" - ctrlbuilder "github.com/apecloud/kubeblocks/internal/controller/factory" "github.com/apecloud/kubeblocks/internal/controller/model" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/dataprotection/action" + dpbackup "github.com/apecloud/kubeblocks/internal/dataprotection/backup" + dperrors "github.com/apecloud/kubeblocks/internal/dataprotection/errors" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + dputils "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" viper "github.com/apecloud/kubeblocks/internal/viperx" ) -const ( - backupPathBase = "/backupdata" - deleteBackupFilesJobNamePrefix = "delete-" -) - -var ( - // errBreakReconcile is not a real error, it is used to break the current reconciliation - errBreakReconcile = errors.New("break reconcile") -) - // BackupReconciler reconciles a Backup object type BackupReconciler struct { client.Client - Scheme *k8sruntime.Scheme - Recorder record.EventRecorder - clock clock.RealClock - snapshotCli *intctrlutil.VolumeSnapshotCompatClient + Scheme *k8sruntime.Scheme + Recorder record.EventRecorder + RestConfig *rest.Config + clock clock.RealClock } // +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backups,verbs=get;list;watch;create;update;patch;delete @@ -94,12 +78,8 @@ type BackupReconciler struct { // +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses/finalizers,verbs=update;patch // Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile +// move the current state of the backup closer to the desired state. func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // NOTES: // setup common request context reqCtx := intctrlutil.RequestCtx{ Ctx: ctx, @@ -107,37 +87,35 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr Log: log.FromContext(ctx).WithValues("backup", req.NamespacedName), Recorder: r.Recorder, } - // initialize snapshotCompatClient - r.snapshotCli = &intctrlutil.VolumeSnapshotCompatClient{ - Client: r.Client, - Ctx: ctx, - } - // Get backup obj - backup := &dataprotectionv1alpha1.Backup{} + + // get backup object, and return if not found + backup := &dpv1alpha1.Backup{} if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, backup); err != nil { return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } - reqCtx.Log.V(1).Info("in Backup Reconciler:", "backup", backup.Name, "phase", backup.Status.Phase) - // handle deletion - res, err := r.handleBackupDeletion(reqCtx, backup) - if res != nil { - return *res, err - } + reqCtx.Log.V(1).Info("reconcile", "backup", req.NamespacedName, "phase", backup.Status.Phase) - switch backup.Status.Phase { - case "", dataprotectionv1alpha1.BackupNew: - return r.doNewPhaseAction(reqCtx, backup) - case dataprotectionv1alpha1.BackupInProgress: - return r.doInProgressPhaseAction(reqCtx, backup) - case dataprotectionv1alpha1.BackupRunning: - if err = r.doInRunningPhaseAction(reqCtx, backup); err != nil { - sendWarningEventForError(r.Recorder, backup, err) + // if backup is being deleted, set backup phase to Deleting. The backup + // reference workloads, data and volume snapshots will be deleted by controller + // later when the backup status.phase is deleting. + if !backup.GetDeletionTimestamp().IsZero() && backup.Status.Phase != dpv1alpha1.BackupPhaseDeleting { + patch := client.MergeFrom(backup.DeepCopy()) + backup.Status.Phase = dpv1alpha1.BackupPhaseDeleting + if err := r.Client.Status().Patch(reqCtx.Ctx, backup, patch); err != nil { return intctrlutil.RequeueWithError(err, reqCtx.Log, "") } - return intctrlutil.Reconciled() - case dataprotectionv1alpha1.BackupCompleted: - return r.doCompletedPhaseAction(reqCtx, backup) + } + + switch backup.Status.Phase { + case "", dpv1alpha1.BackupPhaseNew: + return r.handleNewPhase(reqCtx, backup) + case dpv1alpha1.BackupPhaseRunning: + return r.handleRunningPhase(reqCtx, backup) + case dpv1alpha1.BackupPhaseCompleted: + return r.handleCompletedPhase(reqCtx, backup) + case dpv1alpha1.BackupPhaseDeleting: + return r.handleDeletingPhase(reqCtx, backup) default: return intctrlutil.Reconciled() } @@ -145,74 +123,42 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // SetupWithManager sets up the controller with the Manager. func (r *BackupReconciler) SetupWithManager(mgr ctrl.Manager) error { - b := ctrl.NewControllerManagedBy(mgr). - For(&dataprotectionv1alpha1.Backup{}). + For(&dpv1alpha1.Backup{}). WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(maxConcurDataProtectionReconKey), }). - Owns(&batchv1.Job{}). - Owns(&appsv1.StatefulSet{}). - Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(r.filterBackupPods)) - - if viper.GetBool("VOLUMESNAPSHOT") { - if intctrlutil.InVolumeSnapshotV1Beta1() { - b.Owns(&snapshotv1beta1.VolumeSnapshot{}, builder.Predicates{}) - } else { - b.Owns(&snapshotv1.VolumeSnapshot{}, builder.Predicates{}) - } - } - - return b.Complete(r) -} + Owns(&batchv1.Job{}) -// checkPodsOfStatefulSetHasDeleted checks if the pods of statefulSet have been deleted -func (r *BackupReconciler) checkPodsOfStatefulSetHasDeleted(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) (bool, error) { - podList := &corev1.PodList{} - if err := r.Client.List(reqCtx.Ctx, podList, client.InNamespace(reqCtx.Req.Namespace), - client.MatchingLabels(buildBackupWorkloadsLabels(backup))); err != nil { - return false, err - } - for _, pod := range podList.Items { - for _, owner := range pod.OwnerReferences { - // checks if the pod is owned by sts - if owner.Kind == constant.StatefulSetKind && owner.Name == backup.Name { - return false, nil - } - } + if intctrlutil.InVolumeSnapshotV1Beta1() { + b.Owns(&vsv1beta1.VolumeSnapshot{}, builder.Predicates{}) + } else { + b.Owns(&vsv1.VolumeSnapshot{}, builder.Predicates{}) } - return true, nil + return b.Complete(r) } -// handleBackupDeleting handles the Deleting phase of backup. -func (r *BackupReconciler) handleBackupDeleting(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) error { - hasDeleted, err := r.checkPodsOfStatefulSetHasDeleted(reqCtx, backup) - if err != nil { - return err - } - // wait for pods of sts clean up successfully - if !hasDeleted { - return nil - } - deleteFileJob, err := r.handleDeleteBackupFiles(reqCtx, backup) - if err != nil { - return err - } +// deleteBackupFiles deletes the backup files stored in backup repository. +func (r *BackupReconciler) deleteBackupFiles(reqCtx intctrlutil.RequestCtx, backup *dpv1alpha1.Backup) error { deleteBackup := func() error { // remove backup finalizers to delete it patch := client.MergeFrom(backup.DeepCopy()) - controllerutil.RemoveFinalizer(backup, dataProtectionFinalizerName) + controllerutil.RemoveFinalizer(backup, dptypes.DataProtectionFinalizerName) return r.Patch(reqCtx.Ctx, backup, patch) } - // if deleteFileJob is nil, do not to delete backup files - if deleteFileJob == nil { - return deleteBackup() + + deleter := &dpbackup.Deleter{ + RequestCtx: reqCtx, + Client: r.Client, + Scheme: r.Scheme, } - if containsJobCondition(deleteFileJob, batchv1.JobComplete) { + + status, err := deleter.DeleteBackupFiles(backup) + switch status { + case dpbackup.DeletionStatusSucceeded: return deleteBackup() - } - if containsJobCondition(deleteFileJob, batchv1.JobFailed) { - failureReason := fmt.Sprintf(`the job "%s" for backup files deletion failed, you can delete it to re-delete the files`, deleteFileJob.Name) + case dpbackup.DeletionStatusFailed: + failureReason := err.Error() if backup.Status.FailureReason == failureReason { return nil } @@ -220,382 +166,265 @@ func (r *BackupReconciler) handleBackupDeleting(reqCtx intctrlutil.RequestCtx, b backup.Status.FailureReason = failureReason r.Recorder.Event(backup, corev1.EventTypeWarning, "DeleteBackupFilesFailed", failureReason) return r.Status().Patch(reqCtx.Ctx, backup, backupPatch) + case dpbackup.DeletionStatusDeleting, + dpbackup.DeletionStatusUnknown: + // wait for the deletion job completed + return err } - // wait for the deletion job completed - return nil + return err } -func (r *BackupReconciler) handleBackupDeletion(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) (*ctrl.Result, error) { - if backup.Status.Phase == dataprotectionv1alpha1.BackupDeleting { - // handle deleting - if err := r.handleBackupDeleting(reqCtx, backup); err != nil { - return intctrlutil.ResultToP(intctrlutil.RequeueWithError(err, reqCtx.Log, "")) - } - return intctrlutil.ResultToP(intctrlutil.Reconciled()) - } - if !backup.GetDeletionTimestamp().IsZero() { - if err := r.deleteExternalResources(reqCtx, backup); err != nil { - return intctrlutil.ResultToP(intctrlutil.RequeueWithError(err, reqCtx.Log, "")) - } - // backup phase to Deleting - patch := client.MergeFrom(backup.DeepCopy()) - backup.Status.Phase = dataprotectionv1alpha1.BackupDeleting - if err := r.Client.Status().Patch(reqCtx.Ctx, backup, patch); err != nil { - return intctrlutil.ResultToP(intctrlutil.RequeueWithError(err, reqCtx.Log, "")) - } - return intctrlutil.ResultToP(intctrlutil.Reconciled()) +// handleDeletingPhase handles the deletion of backup. It will delete the backup CR +// and the backup workload(job/statefulset). +func (r *BackupReconciler) handleDeletingPhase(reqCtx intctrlutil.RequestCtx, backup *dpv1alpha1.Backup) (ctrl.Result, error) { + // if backup phase is Deleting, delete the backup reference workloads, + // backup data stored in backup repository and volume snapshots. + // TODO(ldm): if backup is being used by restore, do not delete it. + if err := r.deleteExternalResources(reqCtx, backup); err != nil { + return intctrlutil.RequeueWithError(err, reqCtx.Log, "") } - return nil, nil -} -func (r *BackupReconciler) filterBackupPods(ctx context.Context, obj client.Object) []reconcile.Request { - labels := obj.GetLabels() - if v, ok := labels[constant.AppManagedByLabelKey]; !ok || v != constant.AppName { - return []reconcile.Request{} - } - backupName, ok := labels[constant.DataProtectionLabelBackupNameKey] - if !ok { - return []reconcile.Request{} - } - var isCreateByStatefulSet bool - for _, v := range obj.GetOwnerReferences() { - if v.Kind == constant.StatefulSetKind && v.Name == backupName { - isCreateByStatefulSet = true - break - } + if backup.Spec.DeletionPolicy == dpv1alpha1.BackupDeletionPolicyRetain { + return intctrlutil.Reconciled() } - if !isCreateByStatefulSet { - return []reconcile.Request{} + + if err := r.deleteVolumeSnapshots(reqCtx, backup); err != nil { + return intctrlutil.RequeueWithError(err, reqCtx.Log, "") } - return []reconcile.Request{ - { - NamespacedName: types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: backupName, - }, - }, + + if err := r.deleteBackupFiles(reqCtx, backup); err != nil { + return intctrlutil.RequeueWithError(err, reqCtx.Log, "") } + return intctrlutil.Reconciled() } -func (r *BackupReconciler) getBackupPolicyAndValidate( +func (r *BackupReconciler) handleNewPhase( reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup) (*dataprotectionv1alpha1.BackupPolicy, error) { - // get referenced backup policy - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} - backupPolicyNameSpaceName := types.NamespacedName{ - Namespace: reqCtx.Req.Namespace, - Name: backup.Spec.BackupPolicyName, - } - if err := r.Get(reqCtx.Ctx, backupPolicyNameSpaceName, backupPolicy); err != nil { - return nil, err - } - - if len(backupPolicy.Name) == 0 { - return nil, intctrlutil.NewNotFound(`backup policy "%s" not found`, backupPolicyNameSpaceName) + backup *dpv1alpha1.Backup) (ctrl.Result, error) { + request, err := r.prepareBackupRequest(reqCtx, backup) + if err != nil { + return r.updateStatusIfFailed(reqCtx, backup.DeepCopy(), backup, err) } - // validate backup spec - if err := backup.Spec.Validate(backupPolicy); err != nil { - return nil, err + // set and patch backup object meta, including labels, annotations and finalizers + // if the backup object meta is changed, the backup object will be patched. + if patched, err := r.patchBackupObjectMeta(backup, request); err != nil { + return r.updateStatusIfFailed(reqCtx, backup, request.Backup, err) + } else if patched { + return intctrlutil.Reconciled() } - return backupPolicy, nil -} -func (r *BackupReconciler) validateLogfileBackupLegitimacy(backup *dataprotectionv1alpha1.Backup, - backupPolicy *dataprotectionv1alpha1.BackupPolicy) error { - backupType := backup.Spec.BackupType - if backupType != dataprotectionv1alpha1.BackupTypeLogFile { - return nil - } - if backup.Name != getCreatedCRNameByBackupPolicy(backupPolicy, backupType) { - return intctrlutil.NewInvalidLogfileBackupName(backupPolicy.Name) - } - if backupPolicy.Spec.Schedule.Logfile == nil { - return intctrlutil.NewBackupNotSupported(string(backupType), backupPolicy.Name) - } - if !backupPolicy.Spec.Schedule.Logfile.Enable { - return intctrlutil.NewBackupScheduleDisabled(string(backupType), backupPolicy.Name) + // set and patch backup status + if err = r.patchBackupStatus(backup, request); err != nil { + return r.updateStatusIfFailed(reqCtx, backup, request.Backup, err) } - return nil + return intctrlutil.Reconciled() } -func (r *BackupReconciler) doNewPhaseAction( +// prepareBackupRequest prepares a request for a backup, with all references to +// other kubernetes objects, and validate them. +func (r *BackupReconciler) prepareBackupRequest( reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup) (ctrl.Result, error) { - - patch := client.MergeFrom(backup.DeepCopy()) - // HACK/TODO: ought to move following check to validation webhook - if backup.Spec.BackupType == dataprotectionv1alpha1.BackupTypeSnapshot && !viper.GetBool("VOLUMESNAPSHOT") { - backup.Status.Phase = dataprotectionv1alpha1.BackupFailed - backup.Status.FailureReason = "VolumeSnapshot feature disabled." - if err := r.Client.Status().Patch(reqCtx.Ctx, backup, patch); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - return intctrlutil.Reconciled() + backup *dpv1alpha1.Backup) (*dpbackup.Request, error) { + request := &dpbackup.Request{ + Backup: backup.DeepCopy(), + RequestCtx: reqCtx, + Client: r.Client, } - backupPolicy, err := r.getBackupPolicyAndValidate(reqCtx, backup) - if err != nil { - return r.updateStatusIfFailed(reqCtx, backup, err) + if request.Annotations == nil { + request.Annotations = make(map[string]string) } - if err = r.validateLogfileBackupLegitimacy(backup, backupPolicy); err != nil { - return r.updateStatusIfFailed(reqCtx, backup, err) + if request.Labels == nil { + request.Labels = make(map[string]string) } - updateLabels := map[string]string{} - - // TODO: get pod with matching labels to do backup. - var targetCluster dataprotectionv1alpha1.TargetCluster - var isStatefulSetKind bool - if backup.Spec.BackupType == dataprotectionv1alpha1.BackupTypeSnapshot { - targetCluster = backupPolicy.Spec.Snapshot.Target - } else { - commonPolicy := backupPolicy.Spec.GetCommonPolicy(backup.Spec.BackupType) - if commonPolicy == nil { - return r.updateStatusIfFailed(reqCtx, backup, intctrlutil.NewBackupNotSupported(string(backup.Spec.BackupType), backupPolicy.Name)) - } - targetCluster = commonPolicy.Target - backupTool, err := getBackupToolByName(reqCtx, r.Client, commonPolicy.BackupToolName) - if err != nil { - return r.updateStatusIfFailed(reqCtx, backup, intctrlutil.NewNotFound("backupTool: %s not found", commonPolicy.BackupToolName)) - } - if err = r.buildBackupStatusForBackupTool(reqCtx, backup, backupPolicy, commonPolicy, backupTool, updateLabels); err != nil { - if errors.Is(err, errBreakReconcile) { - // wait for the PVC to be created - return intctrlutil.Reconciled() - } - return r.updateStatusIfFailed(reqCtx, backup, err) - } - isStatefulSetKind = backupTool.Spec.DeployKind == dataprotectionv1alpha1.DeployKindStatefulSet - } - // clean cached annotations if in NEW phase - backupCopy := backup.DeepCopy() - if backupCopy.Annotations[dataProtectionBackupTargetPodKey] != "" { - delete(backupCopy.Annotations, dataProtectionBackupTargetPodKey) - } - target, err := r.getTargetPod(reqCtx, backupCopy, targetCluster.LabelsSelector.MatchLabels) + backupPolicy, err := getBackupPolicyByName(reqCtx, r.Client, backup.Spec.BackupPolicyName) if err != nil { - return r.updateStatusIfFailed(reqCtx, backup, err) + return nil, err } - cluster := r.getCluster(reqCtx, target) - if hasPatch, err := r.patchBackupObjectMeta(reqCtx, backup, target, cluster, updateLabels); err != nil { - return r.updateStatusIfFailed(reqCtx, backup, err) - } else if hasPatch { - return intctrlutil.Reconciled() + targetPods, err := getTargetPods(reqCtx, r.Client, + backup.Annotations[dataProtectionBackupTargetPodKey], backupPolicy) + if err != nil || len(targetPods) == 0 { + return nil, fmt.Errorf("failed to get target pods by backup policy %s/%s", + backupPolicy.Namespace, backupPolicy.Name) } - // clean up failed job if backup type is logfile - if backup.Spec.BackupType == dataprotectionv1alpha1.BackupTypeLogFile { - if err = r.cleanupFailedJob(reqCtx, backup); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } + if len(targetPods) > 1 { + return nil, fmt.Errorf("do not support more than one target pods") } - // update Phase to InProgress/Running - if isStatefulSetKind { - backup.Status.Phase = dataprotectionv1alpha1.BackupRunning - } else { - backup.Status.Phase = dataprotectionv1alpha1.BackupInProgress - } - backup.Status.StartTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} - if backupPolicy.Spec.Retention != nil && backupPolicy.Spec.Retention.TTL != nil { - backup.Status.Expiration = &metav1.Time{ - Time: backup.Status.StartTimestamp.Add(dataprotectionv1alpha1.ToDuration(backupPolicy.Spec.Retention.TTL)), - } + backupMethod := getBackupMethodByName(backup.Spec.BackupMethod, backupPolicy) + if backupMethod == nil { + return nil, intctrlutil.NewNotFound("backupMethod: %s not found", + backup.Spec.BackupMethod) } - if cluster != nil { - backup.Status.SourceCluster = cluster.Name + // backupMethod should specify snapshotVolumes or actionSetName, if we take + // snapshots to back up volumes, the snapshotVolumes should be set to true + // and the actionSetName is not required, if we do not take snapshots to back + // up volumes, the actionSetName is required. + snapshotVolumes := boolptr.IsSetToTrue(backupMethod.SnapshotVolumes) + if !snapshotVolumes && backupMethod.ActionSetName == "" { + return nil, fmt.Errorf("backup method %s should specify snapshotVolumes or actionSetName", backupMethod.Name) } - if err = r.Client.Status().Patch(reqCtx.Ctx, backup, patch); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + + // if backup method use volume snapshots to back up, the volume snapshot + // feature should be enabled. + if snapshotVolumes && !dputils.VolumeSnapshotEnabled() { + return nil, fmt.Errorf("current backup method depends on volume snapshot, but volume snapshot is not enabled") } - return intctrlutil.Reconciled() -} -func (r *BackupReconciler) buildBackupStatusForBackupTool(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - backupTool *dataprotectionv1alpha1.BackupTool, - updateLabels map[string]string) error { - if backup.Status.Manifests == nil { - backup.Status.Manifests = &dataprotectionv1alpha1.ManifestsStatus{} - } - if backup.Status.Manifests.BackupTool == nil { - backup.Status.Manifests.BackupTool = &dataprotectionv1alpha1.BackupToolManifestsStatus{} - } - // handle the PVC used in this backup - if backup.Status.PersistentVolumeClaimName == "" { - pvcName, pvName, err := r.handlePersistentVolumeClaim(reqCtx, backup, backupPolicy.Name, commonPolicy, updateLabels) + if backupMethod.ActionSetName != "" { + actionSet, err := getActionSetByName(reqCtx, r.Client, backupMethod.ActionSetName) if err != nil { - return err + return nil, err } - // record volume name - backup.Status.PersistentVolumeClaimName = pvcName - backup.Status.Manifests.BackupTool.VolumeName = pvName - } - // save the backup message for restore - backup.Status.BackupToolName = backupTool.Name - backupDestinationPath := getBackupDestinationPath(backup, backupPolicy.Annotations[constant.BackupDataPathPrefixAnnotationKey]) - backup.Status.Manifests.BackupTool.FilePath = backupDestinationPath - - if backupTool.Spec.Physical.IsRelyOnLogfile() { - if backupPolicy.Spec.Schedule.Logfile == nil || !backupPolicy.Spec.Schedule.Logfile.Enable { - return intctrlutil.NewBackupLogfileScheduleDisabled(backupTool.Name) + if actionSet.Spec.BackupType != dpv1alpha1.BackupTypeFull { + return nil, fmt.Errorf("only support backup type Full for actionSet %s", actionSet.Name) } - logfileBackupName := getCreatedCRNameByBackupPolicy(backupPolicy, dataprotectionv1alpha1.BackupTypeLogFile) - backup.Status.Manifests.BackupTool.LogFilePath = getBackupDestinationPath(&dataprotectionv1alpha1.Backup{ - ObjectMeta: metav1.ObjectMeta{Namespace: backup.Namespace, Name: logfileBackupName}, - }, backupPolicy.Annotations[constant.BackupDataPathPrefixAnnotationKey]) + request.ActionSet = actionSet + } - logFilePvcName, _, err := r.handlePersistentVolumeClaim(reqCtx, backup, backupPolicy.Name, backupPolicy.Spec.Logfile, updateLabels) - if err != nil { - return err - } - backup.Status.LogFilePersistentVolumeClaimName = logFilePvcName + request.BackupPolicy = backupPolicy + if err = r.handleBackupRepo(request); err != nil { + return nil, err } - return nil + + request.BackupMethod = backupMethod + request.TargetPods = targetPods + return request, nil } -func (r *BackupReconciler) cleanupFailedJob(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) error { - jobList := batchv1.JobList{} - if err := r.Client.List(reqCtx.Ctx, &jobList, client.InNamespace(backup.Namespace), - client.MatchingLabels{constant.DataProtectionLabelBackupNameKey: backup.Name}); err != nil { - return nil +// handleBackupRepo handles the backup repo, and get the backup repo PVC. If the +// PVC is not present, it will add a special label and wait for the backup repo +// controller to create the PVC. +func (r *BackupReconciler) handleBackupRepo(request *dpbackup.Request) error { + repo, err := r.getBackupRepo(request.Ctx, request.Backup, request.BackupPolicy) + if err != nil { + return err } + request.BackupRepo = repo - for _, job := range jobList.Items { - if !containsJobCondition(&job, batchv1.JobFailed) { - continue - } - if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &job); err != nil { - return err - } - if controllerutil.ContainsFinalizer(&job, dataProtectionFinalizerName) { - patch := client.MergeFrom(job.DeepCopy()) - controllerutil.RemoveFinalizer(&job, dataProtectionFinalizerName) - if err := r.Patch(reqCtx.Ctx, &job, patch); err != nil { - return err - } - } + pvcName := repo.Status.BackupPVCName + if pvcName == "" { + return dperrors.NewBackupPVCNameIsEmpty(repo.Name, request.Spec.BackupPolicyName) } - return nil -} -func (r *BackupReconciler) handlePersistentVolumeClaim(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicyName string, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - updateLabels map[string]string) (pvcName string, pvName string, err error) { - // check the PVC from the backup repo - pvcName, pvName, err = r.handlePVCByBackupRepo(reqCtx, backup, backupPolicyName, commonPolicy, updateLabels) - if err == nil || !errors.Is(err, errNoDefaultBackupRepo) { - return pvcName, pvName, err + pvc := &corev1.PersistentVolumeClaim{} + pvcKey := client.ObjectKey{Namespace: request.Req.Namespace, Name: pvcName} + if err = r.Client.Get(request.Ctx, pvcKey, pvc); err != nil { + return client.IgnoreNotFound(err) } - // fallback to the legacy PVC field for compatibility - if commonPolicy.PersistentVolumeClaim.Name != nil { - pvcName = *commonPolicy.PersistentVolumeClaim.Name + // backupRepo PVC exists, record the PVC name + if err == nil { + request.BackupRepoPVC = pvc } - pvName, err = r.handlePersistentVolumeClaimLegacy(reqCtx, backup.Spec.BackupType, backupPolicyName, commonPolicy) - return pvcName, pvName, err + return nil } -func (r *BackupReconciler) handlePVCByBackupRepo(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicyName string, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - updateLabels map[string]string) (pvcName string, pvName string, err error) { - // check the PVC from backup repo - repo, err := r.getBackupRepo(reqCtx, backup, commonPolicy) +func (r *BackupReconciler) patchBackupStatus( + original *dpv1alpha1.Backup, + request *dpbackup.Request) error { + request.Status.FormatVersion = dpbackup.FormatVersion + request.Status.Path = dpbackup.BuildBackupPath(request.Backup, request.BackupPolicy.Spec.PathPrefix) + request.Status.Target = request.BackupPolicy.Spec.Target + request.Status.BackupMethod = request.BackupMethod + request.Status.PersistentVolumeClaimName = request.BackupRepoPVC.Name + request.Status.BackupRepoName = request.BackupRepo.Name + + // init action status + actions, err := request.BuildActions() if err != nil { - return "", "", err - } - pvcName = repo.Status.BackupPVCName - if pvcName == "" { - err = intctrlutil.NewBackupPVCNameIsEmpty(string(backup.Spec.BackupType), backupPolicyName) - return "", "", err + return err } - pvc := &corev1.PersistentVolumeClaim{} - err = r.Client.Get(reqCtx.Ctx, client.ObjectKey{ - Namespace: reqCtx.Req.Namespace, - Name: pvcName, - }, pvc) - if err != nil && !apierrors.IsNotFound(err) { - // error occurred - return "", "", err + request.Status.Actions = make([]dpv1alpha1.ActionStatus, len(actions)) + for i, act := range actions { + request.Status.Actions[i] = dpv1alpha1.ActionStatus{ + Name: act.GetName(), + Phase: dpv1alpha1.ActionPhaseNew, + ActionType: act.Type(), + } } - if err == nil { - // the PVC is already present, bind the backup to the repo - updateLabels[dataProtectionBackupRepoKey] = repo.Name - return pvcName, pvc.Spec.VolumeName, nil - } - // the PVC is not present - // add a special label and wait for the backup repo controller to create the PVC. - // we need to update the object meta immediately, because we are going to break the current reconciliation. - _, err = r.patchBackupObjectLabels(reqCtx, backup, map[string]string{ - dataProtectionBackupRepoKey: repo.Name, - dataProtectionWaitRepoPreparationKey: trueVal, - }) + + // update phase to running + request.Status.Phase = dpv1alpha1.BackupPhaseRunning + request.Status.StartTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} + + duration, err := original.Spec.RetentionPeriod.ToDuration() if err != nil { - return "", "", err + return fmt.Errorf("failed to parse retention period %s, %v", original.Spec.RetentionPeriod, err) + } + if original.Spec.RetentionPeriod != "" { + request.Status.Expiration = &metav1.Time{ + Time: request.Status.StartTimestamp.Add(duration), + } } - return "", "", errBreakReconcile + return r.Client.Status().Patch(request.Ctx, request.Backup, client.MergeFrom(original)) } -// handlePersistentVolumeClaimLegacy handles the persistent volume claim for the backup, the rules are as follows -// - if CreatePolicy is "Never", it will check if the pvc exists. if not existed, then report an error. -// - if CreatePolicy is "IfNotPresent" and the pvc not existed, then create the pvc automatically. -func (r *BackupReconciler) handlePersistentVolumeClaimLegacy(reqCtx intctrlutil.RequestCtx, - backupType dataprotectionv1alpha1.BackupType, - backupPolicyName string, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy) (string, error) { - pvcConfig := commonPolicy.PersistentVolumeClaim - if pvcConfig.Name == nil || len(*pvcConfig.Name) == 0 { - return "", intctrlutil.NewBackupPVCNameIsEmpty(string(backupType), backupPolicyName) - } - pvc := &corev1.PersistentVolumeClaim{} - if err := r.Client.Get(reqCtx.Ctx, client.ObjectKey{Namespace: reqCtx.Req.Namespace, - Name: *pvcConfig.Name}, pvc); err != nil && !apierrors.IsNotFound(err) { - return "", err - } - if len(pvc.Name) > 0 { - return pvc.Spec.VolumeName, nil - } - if pvcConfig.CreatePolicy == dataprotectionv1alpha1.CreatePVCPolicyNever { - return "", intctrlutil.NewNotFound(`persistent volume claim "%s" not found`, *pvcConfig.Name) - } - if pvcConfig.PersistentVolumeConfigMap != nil && - (pvcConfig.StorageClassName == nil || *pvcConfig.StorageClassName == "") { - // if the storageClassName is empty and the PersistentVolumeConfigMap is not empty, - // create the persistentVolume with the template - if err := r.createPersistentVolumeWithTemplate(reqCtx, backupPolicyName, &pvcConfig); err != nil { - return "", err +// patchBackupObjectMeta patches backup object metaObject include cluster snapshot. +func (r *BackupReconciler) patchBackupObjectMeta( + original *dpv1alpha1.Backup, + request *dpbackup.Request) (bool, error) { + targetPod := request.TargetPods[0] + + // get KubeBlocks cluster and set labels and annotations for backup + // TODO(ldm): we should remove this dependency of cluster in the future + cluster := getCluster(request.Ctx, r.Client, targetPod) + if cluster != nil { + if err := setClusterSnapshotAnnotation(request.Backup, cluster); err != nil { + return false, err } + request.Labels[dptypes.DataProtectionLabelClusterUIDKey] = string(cluster.UID) + } + for _, v := range getClusterLabelKeys() { + request.Labels[v] = targetPod.Labels[v] } - return "", r.createPVCWithStorageClassName(reqCtx, backupPolicyName, pvcConfig) + + request.Labels[dataProtectionBackupRepoKey] = request.BackupRepo.Name + request.Labels[constant.AppManagedByLabelKey] = constant.AppName + request.Labels[dataProtectionLabelBackupTypeKey] = request.GetBackupType() + + // if the backupRepo PVC is not present, add a special label and wait for the + // backup repo controller to create the PVC. + wait := false + if request.BackupRepoPVC == nil { + request.Labels[dataProtectionWaitRepoPreparationKey] = trueVal + wait = true + } + + // set annotations + request.Annotations[dataProtectionBackupTargetPodKey] = targetPod.Name + + // set finalizer + controllerutil.AddFinalizer(request.Backup, dptypes.DataProtectionFinalizerName) + + if reflect.DeepEqual(original.ObjectMeta, request.ObjectMeta) { + return wait, nil + } + + return true, r.Client.Patch(request.Ctx, request.Backup, client.MergeFrom(original)) } // getBackupRepo returns the backup repo specified by the backup object or the policy. // if no backup repo specified, it will return the default one. -func (r *BackupReconciler) getBackupRepo( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy) (*dataprotectionv1alpha1.BackupRepo, error) { +func (r *BackupReconciler) getBackupRepo(ctx context.Context, + backup *dpv1alpha1.Backup, + backupPolicy *dpv1alpha1.BackupPolicy) (*dpv1alpha1.BackupRepo, error) { // use the specified backup repo var repoName string if val := backup.Labels[dataProtectionBackupRepoKey]; val != "" { repoName = val - } else if commonPolicy.BackupRepoName != nil && *commonPolicy.BackupRepoName != "" { - repoName = *commonPolicy.BackupRepoName + } else if backupPolicy.Spec.BackupRepoName != nil && *backupPolicy.Spec.BackupRepoName != "" { + repoName = *backupPolicy.Spec.BackupRepoName } if repoName != "" { - repo := &dataprotectionv1alpha1.BackupRepo{} - err := r.Client.Get(reqCtx.Ctx, client.ObjectKey{Name: repoName}, repo) - if err != nil { + repo := &dpv1alpha1.BackupRepo{} + if err := r.Client.Get(ctx, client.ObjectKey{Name: repoName}, repo); err != nil { if apierrors.IsNotFound(err) { return nil, intctrlutil.NewNotFound("backup repo %s not found", repoName) } @@ -604,1399 +433,195 @@ func (r *BackupReconciler) getBackupRepo( return repo, nil } // fallback to use the default repo - return getDefaultBackupRepo(reqCtx.Ctx, r.Client) -} - -// createPVCWithStorageClassName creates the persistent volume claim with the storageClassName. -func (r *BackupReconciler) createPVCWithStorageClassName(reqCtx intctrlutil.RequestCtx, - backupPolicyName string, - pvcConfig dataprotectionv1alpha1.PersistentVolumeClaim) error { - pvc := &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: *pvcConfig.Name, - Namespace: reqCtx.Req.Namespace, - Annotations: buildAutoCreationAnnotations(backupPolicyName), - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pvcConfig.StorageClassName, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: pvcConfig.InitCapacity, - }, - }, - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteMany, - }, - }, - } - err := r.Client.Create(reqCtx.Ctx, pvc) - return client.IgnoreAlreadyExists(err) + return getDefaultBackupRepo(ctx, r.Client) } -// createPersistentVolumeWithTemplate creates the persistent volume with the template. -func (r *BackupReconciler) createPersistentVolumeWithTemplate(reqCtx intctrlutil.RequestCtx, - backupPolicyName string, - pvcConfig *dataprotectionv1alpha1.PersistentVolumeClaim) error { - pvConfig := pvcConfig.PersistentVolumeConfigMap - configMap := &corev1.ConfigMap{} - if err := r.Client.Get(reqCtx.Ctx, client.ObjectKey{Namespace: pvConfig.Namespace, - Name: pvConfig.Name}, configMap); err != nil { - return err - } - pvTemplate := configMap.Data[persistentVolumeTemplateKey] - if pvTemplate == "" { - return intctrlutil.NewBackupPVTemplateNotFound(pvConfig.Namespace, pvConfig.Name) - } - pvName := fmt.Sprintf("%s-%s", *pvcConfig.Name, reqCtx.Req.Namespace) - pvTemplate = strings.ReplaceAll(pvTemplate, "$(GENERATE_NAME)", pvName) - pv := &corev1.PersistentVolume{} - if err := yaml.Unmarshal([]byte(pvTemplate), pv); err != nil { - return err - } - pv.Name = pvName - pv.Spec.ClaimRef = &corev1.ObjectReference{ - Namespace: reqCtx.Req.Namespace, - Name: *pvcConfig.Name, - } - pv.Annotations = buildAutoCreationAnnotations(backupPolicyName) - // set the storageClassName to empty for the persistentVolumeClaim to avoid the dynamic provisioning - emptyStorageClassName := "" - pvcConfig.StorageClassName = &emptyStorageClassName - controllerutil.AddFinalizer(pv, dataProtectionFinalizerName) - return r.Client.Create(reqCtx.Ctx, pv) -} - -func (r *BackupReconciler) doInProgressPhaseAction( +func (r *BackupReconciler) handleRunningPhase( reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup) (ctrl.Result, error) { - backupPolicy, err := r.getBackupPolicyAndValidate(reqCtx, backup) + backup *dpv1alpha1.Backup) (ctrl.Result, error) { + request, err := r.prepareBackupRequest(reqCtx, backup) if err != nil { - return r.updateStatusIfFailed(reqCtx, backup, err) - } - backupDestinationPath := getBackupDestinationPath(backup, backupPolicy.Annotations[constant.BackupDataPathPrefixAnnotationKey]) - patch := client.MergeFrom(backup.DeepCopy()) - var res *ctrl.Result - switch backup.Spec.BackupType { - case dataprotectionv1alpha1.BackupTypeSnapshot: - res, err = r.doSnapshotInProgressPhaseAction(reqCtx, backup, backupPolicy, backupDestinationPath) - default: - res, err = r.doBaseBackupInProgressPhaseAction(reqCtx, backup, backupPolicy, backupDestinationPath) + return r.updateStatusIfFailed(reqCtx, backup.DeepCopy(), backup, err) } - if res != nil { - return *res, err - } else if err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - // finally, update backup status - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatedBackup", "Completed backup.") - if backup.Status.CompletionTimestamp != nil { - // round the duration to a multiple of seconds. - duration := backup.Status.CompletionTimestamp.Sub(backup.Status.StartTimestamp.Time).Round(time.Second) - backup.Status.Duration = &metav1.Duration{Duration: duration} - } - if err := r.Client.Status().Patch(reqCtx.Ctx, backup, patch); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - - return intctrlutil.Reconciled() -} - -// doSnapshotInProgressPhaseAction handles for snapshot backup during in progress. -func (r *BackupReconciler) doSnapshotInProgressPhaseAction(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - backupDestinationPath string) (*ctrl.Result, error) { - // 1. create and ensure pre-command job completed - // 2. create and ensure volume snapshot ready - // 3. create and ensure post-command job completed - snapshotSpec := backupPolicy.Spec.Snapshot - isOK, err := r.createPreCommandJobAndEnsure(reqCtx, backup, snapshotSpec) + // there are actions not completed, continue to handle following actions + actions, err := request.BuildActions() if err != nil { - return intctrlutil.ResultToP(r.updateStatusIfFailed(reqCtx, backup, err)) - } - if !isOK { - return intctrlutil.ResultToP(intctrlutil.RequeueAfter(reconcileInterval, reqCtx.Log, "")) - } - if err = r.createUpdatesJobs(reqCtx, backup, nil, &snapshotSpec.BasePolicy, backupDestinationPath, dataprotectionv1alpha1.PRE); err != nil { - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatedPreUpdatesJob", err.Error()) - } - if err = r.createVolumeSnapshot(reqCtx, backup, backupPolicy.Spec.Snapshot); err != nil { - return intctrlutil.ResultToP(r.updateStatusIfFailed(reqCtx, backup, err)) + return r.updateStatusIfFailed(reqCtx, backup, request.Backup, err) } - key := types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: backup.Name} - isOK, snapshotTime, err := r.ensureVolumeSnapshotReady(key) - if err != nil { - return intctrlutil.ResultToP(r.updateStatusIfFailed(reqCtx, backup, err)) + actionCtx := action.Context{ + Ctx: reqCtx.Ctx, + Client: r.Client, + Recorder: r.Recorder, + Scheme: r.Scheme, + RestClientConfig: r.RestConfig, } - if !isOK { - return intctrlutil.ResultToP(intctrlutil.Reconciled()) - } - msg := fmt.Sprintf("Created volumeSnapshot %s ready.", key.Name) - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatedVolumeSnapshot", msg) - isOK, err = r.createPostCommandJobAndEnsure(reqCtx, backup, snapshotSpec) - if err != nil { - return intctrlutil.ResultToP(r.updateStatusIfFailed(reqCtx, backup, err)) - } - if !isOK { - return intctrlutil.ResultToP(intctrlutil.RequeueAfter(reconcileInterval, reqCtx.Log, "")) - } + // check all actions status, if any action failed, update backup status to failed + // if all actions completed, update backup status to completed, otherwise, + // continue to handle following actions. + for i, act := range actions { + status, err := act.Execute(actionCtx) + if err != nil { + return r.updateStatusIfFailed(reqCtx, backup, request.Backup, err) + } + request.Status.Actions[i] = mergeActionStatus(&request.Status.Actions[i], status) - // Failure MetadataCollectionJob does not affect the backup status. - if err = r.createUpdatesJobs(reqCtx, backup, nil, &snapshotSpec.BasePolicy, backupDestinationPath, dataprotectionv1alpha1.POST); err != nil { - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatedPostUpdatesJob", err.Error()) + switch status.Phase { + case dpv1alpha1.ActionPhaseCompleted: + updateBackupStatusByActionStatus(&request.Status) + continue + case dpv1alpha1.ActionPhaseFailed: + return r.updateStatusIfFailed(reqCtx, backup, request.Backup, + fmt.Errorf("action %s failed, %s", act.GetName(), status.FailureReason)) + case dpv1alpha1.ActionPhaseRunning: + // update status + if err = r.Client.Status().Patch(reqCtx.Ctx, request.Backup, client.MergeFrom(backup)); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + return intctrlutil.Reconciled() + } } - backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted - backup.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} - backup.Status.Manifests = &dataprotectionv1alpha1.ManifestsStatus{ - BackupLog: &dataprotectionv1alpha1.BackupLogStatus{ - StartTime: snapshotTime, - StopTime: snapshotTime, - }, + // all actions completed, update backup status to completed + request.Status.Phase = dpv1alpha1.BackupPhaseCompleted + request.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} + if !request.Status.StartTimestamp.IsZero() { + // round the duration to a multiple of seconds. + duration := request.Status.CompletionTimestamp.Sub(request.Status.StartTimestamp.Time).Round(time.Second) + request.Status.Duration = &metav1.Duration{Duration: duration} } - snap := &snapshotv1.VolumeSnapshot{} - exists, _ := r.snapshotCli.CheckResourceExists(key, snap) - if exists { - backup.Status.TotalSize = snap.Status.RestoreSize.String() + r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatedBackup", "Completed backup") + if err = r.Client.Status().Patch(reqCtx.Ctx, request.Backup, client.MergeFrom(backup)); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } - return nil, nil + return intctrlutil.Reconciled() } -// doBaseBackupInProgressPhaseAction handles for base backup during in progress. -func (r *BackupReconciler) doBaseBackupInProgressPhaseAction(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - backupDestinationPath string) (*ctrl.Result, error) { - // 1. create and ensure backup tool job finished - // 2. get job phase and update - commonPolicy := backupPolicy.Spec.GetCommonPolicy(backup.Spec.BackupType) - if commonPolicy == nil { - // TODO: add error type - return intctrlutil.ResultToP(r.updateStatusIfFailed(reqCtx, backup, fmt.Errorf("not found the %s policy", backup.Spec.BackupType))) - } - // createUpdatesJobs should not affect the backup status, just need to record events when the run fails - if err := r.createUpdatesJobs(reqCtx, backup, commonPolicy, &commonPolicy.BasePolicy, backupDestinationPath, dataprotectionv1alpha1.PRE); err != nil { - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatedPreUpdatesJob", err.Error()) - } - if err := r.createBackupToolJob(reqCtx, backup, backupPolicy, commonPolicy, backupDestinationPath); err != nil { - return intctrlutil.ResultToP(r.updateStatusIfFailed(reqCtx, backup, err)) - } - key := types.NamespacedName{Namespace: backup.Namespace, Name: backup.Name} - isOK, err := r.ensureBatchV1JobCompleted(reqCtx, key) - if err != nil { - return intctrlutil.ResultToP(r.updateStatusIfFailed(reqCtx, backup, err)) +func mergeActionStatus(original, new *dpv1alpha1.ActionStatus) dpv1alpha1.ActionStatus { + as := new.DeepCopy() + if original.StartTimestamp != nil { + as.StartTimestamp = original.StartTimestamp } - if !isOK { - return intctrlutil.ResultToP(intctrlutil.Reconciled()) - } - // createUpdatesJobs should not affect the backup status, just need to record events when the run fails - if err = r.createUpdatesJobs(reqCtx, backup, commonPolicy, &commonPolicy.BasePolicy, backupDestinationPath, dataprotectionv1alpha1.POST); err != nil { - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatedPostUpdatesJob", err.Error()) - } - // updates Phase directly to Completed because `ensureBatchV1JobCompleted` has checked job failed - backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted - backup.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} + return *as +} - if backup.Spec.BackupType == dataprotectionv1alpha1.BackupTypeLogFile { - if backup.Status.Manifests != nil && - backup.Status.Manifests.BackupLog != nil && - backup.Status.Manifests.BackupLog.StartTime == nil { - backup.Status.Manifests.BackupLog.StartTime = backup.Status.Manifests.BackupLog.StopTime +func updateBackupStatusByActionStatus(backupStatus *dpv1alpha1.BackupStatus) { + for _, act := range backupStatus.Actions { + if act.TotalSize != "" && backupStatus.TotalSize == "" { + backupStatus.TotalSize = act.TotalSize + } + if act.TimeRange != nil && backupStatus.TimeRange == nil { + backupStatus.TimeRange = act.TimeRange } } - return nil, nil } -func (r *BackupReconciler) doInRunningPhaseAction( +// handleCompletedPhase handles the backup object in completed phase. +// It will delete the reference workloads. +func (r *BackupReconciler) handleCompletedPhase( reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup) error { - backupPolicy, isCompleted, err := r.checkBackupIsCompletedDuringRunning(reqCtx, backup) - if err != nil { - return err - } else if isCompleted { - return nil - } - commonPolicy := backupPolicy.Spec.GetCommonPolicy(backup.Spec.BackupType) - if commonPolicy == nil { - return fmt.Errorf(`can not find spec.%s in BackupPolicy "%s"`, strings.ToLower(string(backup.Spec.BackupType)), backupPolicy.Name) - } - // reconcile StatefulSet - sts := &appsv1.StatefulSet{} - exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, types.NamespacedName{ - Namespace: backup.Namespace, - Name: backup.Name, - }, sts) - if err != nil { - return err - } - statefulSetSpec, err := r.buildStatefulSpec(reqCtx, backup, backupPolicy, commonPolicy) - if err != nil { - return err - } - // if not exists, create the statefulSet - if !exists { - return r.createBackupStatefulSet(reqCtx, backup, statefulSetSpec) - } - sts.Spec.Template = statefulSetSpec.Template - // update the statefulSet - if err = r.Update(reqCtx.Ctx, sts); err != nil { - return err + backup *dpv1alpha1.Backup) (ctrl.Result, error) { + if err := r.deleteExternalResources(reqCtx, backup); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } - // if available replicas not changed, return - if backup.Status.AvailableReplicas != nil && *backup.Status.AvailableReplicas == sts.Status.AvailableReplicas { - return nil + return intctrlutil.Reconciled() +} + +func (r *BackupReconciler) updateStatusIfFailed( + reqCtx intctrlutil.RequestCtx, + original *dpv1alpha1.Backup, + backup *dpv1alpha1.Backup, + err error) (ctrl.Result, error) { + sendWarningEventForError(r.Recorder, backup, err) + backup.Status.Phase = dpv1alpha1.BackupPhaseFailed + backup.Status.FailureReason = err.Error() + if errUpdate := r.Client.Status().Patch(reqCtx.Ctx, backup, client.MergeFrom(original)); errUpdate != nil { + return intctrlutil.CheckedRequeueWithError(errUpdate, reqCtx.Log, "") } - patch := client.MergeFrom(backup.DeepCopy()) - backup.Status.AvailableReplicas = &sts.Status.AvailableReplicas - return r.Status().Patch(reqCtx.Ctx, backup, patch) + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } -// checkBackupIsCompletedDuringRunning checks if backup is completed during it is running. -// it returns ture, if logfile schedule is disabled or cluster is deleted. -func (r *BackupReconciler) checkBackupIsCompletedDuringRunning(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup) (*dataprotectionv1alpha1.BackupPolicy, bool, error) { - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} - exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, types.NamespacedName{ - Namespace: reqCtx.Req.Namespace, - Name: backup.Spec.BackupPolicyName, - }, backupPolicy) - if err != nil { - return backupPolicy, false, err +// deleteExternalJobs deletes the external jobs. +func (r *BackupReconciler) deleteExternalJobs(reqCtx intctrlutil.RequestCtx, backup *dpv1alpha1.Backup) error { + jobs := &batchv1.JobList{} + if err := r.Client.List(reqCtx.Ctx, jobs, + client.InNamespace(backup.Namespace), + client.MatchingLabels(dpbackup.BuildBackupWorkloadLabels(backup))); err != nil { + return client.IgnoreNotFound(err) } - if exists { - if err = backup.Spec.Validate(backupPolicy); err != nil { - return backupPolicy, false, err + + deleteJob := func(job *batchv1.Job) error { + if err := dputils.RemoveDataProtectionFinalizer(reqCtx.Ctx, r.Client, job); err != nil { + return err } - clusterName := backup.Labels[constant.AppInstanceLabelKey] - targetClusterExists := true - if clusterName != "" { - cluster := &appsv1alpha1.Cluster{} - var err error - targetClusterExists, err = intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, types.NamespacedName{Name: clusterName, Namespace: backup.Namespace}, cluster) - if err != nil { - return backupPolicy, false, err - } + if !job.DeletionTimestamp.IsZero() { + return nil } - - schedulePolicy := backupPolicy.Spec.GetCommonSchedulePolicy(backup.Spec.BackupType) - if schedulePolicy.Enable && targetClusterExists { - return backupPolicy, false, nil + reqCtx.Log.V(1).Info("delete job", "job", job) + if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, job); err != nil { + return err } + return nil } - patch := client.MergeFrom(backup.DeepCopy()) - backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted - backup.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} - if !backup.Status.StartTimestamp.IsZero() { - // round the duration to a multiple of seconds. - duration := backup.Status.CompletionTimestamp.Sub(backup.Status.StartTimestamp.Time).Round(time.Second) - backup.Status.Duration = &metav1.Duration{Duration: duration} + + for i := range jobs.Items { + if err := deleteJob(&jobs.Items[i]); err != nil { + return err + } } - return backupPolicy, true, r.Client.Status().Patch(reqCtx.Ctx, backup, patch) + return nil } -func (r *BackupReconciler) createBackupStatefulSet(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - stsSpec *appsv1.StatefulSetSpec) error { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: backup.Name, - Namespace: backup.Namespace, - Labels: buildBackupWorkloadsLabels(backup), - }, - Spec: *stsSpec, +func (r *BackupReconciler) deleteVolumeSnapshots(reqCtx intctrlutil.RequestCtx, + backup *dpv1alpha1.Backup) error { + deleter := &dpbackup.Deleter{ + RequestCtx: reqCtx, + Client: r.Client, } - controllerutil.AddFinalizer(sts, dataProtectionFinalizerName) - if err := controllerutil.SetControllerReference(backup, sts, r.Scheme); err != nil { - return err - } - return r.Client.Create(reqCtx.Ctx, sts) + return deleter.DeleteVolumeSnapshots(backup) } -func (r *BackupReconciler) buildManifestsUpdaterContainer(backup *dataprotectionv1alpha1.Backup, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - backupDestinationPath string) (corev1.Container, error) { - container := corev1.Container{} - cueFS, _ := debme.FS(cueTemplates, "cue") - cueTpl, err := intctrlutil.NewCUETplFromBytes(cueFS.ReadFile("manifests_updater.cue")) - if err != nil { - return container, err - } - cueValue := intctrlutil.NewCUEBuilder(*cueTpl) - optionsBytes, err := json.Marshal(map[string]string{ - "backupName": backup.Name, - "namespace": backup.Namespace, - "image": viper.GetString(constant.KBToolsImage), - "containerName": manifestsUpdaterContainerName, - "imagePullPolicy": viper.GetString(constant.KBImagePullPolicy), - }) - if err != nil { - return container, err - } - if err = cueValue.Fill("options", optionsBytes); err != nil { - return container, err - } - containerBytes, err := cueValue.Lookup("container") - if err != nil { - return container, err +// deleteExternalStatefulSet deletes the external statefulSet. +func (r *BackupReconciler) deleteExternalStatefulSet(reqCtx intctrlutil.RequestCtx, backup *dpv1alpha1.Backup) error { + key := client.ObjectKey{ + Namespace: backup.Namespace, + Name: backup.Name, } - if err = json.Unmarshal(containerBytes, &container); err != nil { - return container, err + sts := &appsv1.StatefulSet{} + if err := r.Client.Get(reqCtx.Ctx, key, sts); err != nil { + return client.IgnoreNotFound(err) + } else if !model.IsOwnerOf(backup, sts) { + return nil } - container.VolumeMounts = []corev1.VolumeMount{ - {Name: fmt.Sprintf("backup-%s", backup.Status.PersistentVolumeClaimName), MountPath: backupPathBase}, + + patch := client.MergeFrom(sts.DeepCopy()) + controllerutil.RemoveFinalizer(sts, dptypes.DataProtectionFinalizerName) + if err := r.Client.Patch(reqCtx.Ctx, sts, patch); err != nil { + return err } - intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) - container.Env = []corev1.EnvVar{ - {Name: constant.DPBackupInfoFile, Value: buildBackupInfoENV(backupDestinationPath)}, + + if !sts.DeletionTimestamp.IsZero() { + return nil } - return container, nil + + reqCtx.Log.V(1).Info("delete statefulSet", "statefulSet", sts) + return intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, sts) } -func (r *BackupReconciler) buildStatefulSpec(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy) (*appsv1.StatefulSetSpec, error) { - backupDestinationPath := getBackupDestinationPath(backup, backupPolicy.Annotations[constant.BackupDataPathPrefixAnnotationKey]) - toolPodSpec, err := r.buildBackupToolPodSpec(reqCtx, backup, backupPolicy, commonPolicy, backupDestinationPath) - toolPodSpec.RestartPolicy = corev1.RestartPolicyAlways - if err != nil { - return nil, err - } - // build the manifests updater container for backup.status.manifests - manifestsUpdaterContainer, err := r.buildManifestsUpdaterContainer(backup, commonPolicy, backupDestinationPath) - if err != nil { - return nil, err - } - // build ARCHIVE_INTERVAL env - schedulePolicy := backupPolicy.Spec.GetCommonSchedulePolicy(backup.Spec.BackupType) - interval := getIntervalSecondsForLogfile(backup.Spec.BackupType, schedulePolicy.CronExpression) - if interval != "" { - toolPodSpec.Containers[0].Env = append(toolPodSpec.Containers[0].Env, corev1.EnvVar{ - Name: constant.DPArchiveInterval, - Value: interval, - }) - } - target, _ := r.getTargetPod(reqCtx, backup, commonPolicy.Target.LabelsSelector.MatchLabels) - if target != nil && target.Spec.ServiceAccountName != "" { - toolPodSpec.Containers = append(toolPodSpec.Containers, manifestsUpdaterContainer) - toolPodSpec.ServiceAccountName = target.Spec.ServiceAccountName - } - backupLabels := buildBackupWorkloadsLabels(backup) - defaultReplicas := int32(1) - return &appsv1.StatefulSetSpec{ - Replicas: &defaultReplicas, - Selector: &metav1.LabelSelector{ - MatchLabels: backupLabels, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: backupLabels, - }, - Spec: toolPodSpec, - }, - }, nil -} - -func (r *BackupReconciler) doCompletedPhaseAction( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup) (ctrl.Result, error) { - - if err := r.deleteReferenceBatchV1Jobs(reqCtx, backup); err != nil && !apierrors.IsNotFound(err) { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - - if err := r.deleteReferenceStatefulSet(reqCtx, backup); err != nil && !apierrors.IsNotFound(err) { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - return intctrlutil.Reconciled() -} - -func (r *BackupReconciler) updateStatusIfFailed(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, err error) (ctrl.Result, error) { - patch := client.MergeFrom(backup.DeepCopy()) - sendWarningEventForError(r.Recorder, backup, err) - backup.Status.Phase = dataprotectionv1alpha1.BackupFailed - backup.Status.FailureReason = err.Error() - if errUpdate := r.Client.Status().Patch(reqCtx.Ctx, backup, patch); errUpdate != nil { - return intctrlutil.CheckedRequeueWithError(errUpdate, reqCtx.Log, "") - } - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") -} - -// getCluster gets the cluster and will ignore the error. -func (r *BackupReconciler) getCluster( - reqCtx intctrlutil.RequestCtx, - targetPod *corev1.Pod) *appsv1alpha1.Cluster { - clusterName := targetPod.Labels[constant.AppInstanceLabelKey] - if len(clusterName) == 0 { - return nil - } - cluster := &appsv1alpha1.Cluster{} - if err := r.Client.Get(reqCtx.Ctx, types.NamespacedName{ - Namespace: targetPod.Namespace, - Name: clusterName, - }, cluster); err != nil { - // should not affect the backup status - return nil - } - return cluster -} - -// patchBackupObjectLabels add missed labels to the backup object. -func (r *BackupReconciler) patchBackupObjectLabels( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - labels map[string]string) (bool, error) { - oldBackup := backup.DeepCopy() - if backup.Labels == nil { - backup.Labels = make(map[string]string) - } - for k, v := range labels { - backup.Labels[k] = v - } - if reflect.DeepEqual(oldBackup.ObjectMeta, backup.ObjectMeta) { - return false, nil - } - return true, r.Client.Patch(reqCtx.Ctx, backup, client.MergeFrom(oldBackup)) -} - -// patchBackupObjectMeta patches backup object metaObject include cluster snapshot. -func (r *BackupReconciler) patchBackupObjectMeta( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - targetPod *corev1.Pod, - cluster *appsv1alpha1.Cluster, - updateLabels map[string]string) (bool, error) { - if backup.Labels == nil { - backup.Labels = make(map[string]string) - } - oldBackup := backup.DeepCopy() - if cluster != nil { - if err := r.setClusterSnapshotAnnotation(backup, cluster); err != nil { - return false, err - } - backup.Labels[constant.DataProtectionLabelClusterUIDKey] = string(cluster.UID) - } - for _, v := range getClusterLabelKeys() { - backup.Labels[v] = targetPod.Labels[v] - } - backup.Labels[constant.AppManagedByLabelKey] = constant.AppName - backup.Labels[dataProtectionLabelBackupTypeKey] = string(backup.Spec.BackupType) - for k, v := range updateLabels { - backup.Labels[k] = v - } - if backup.Annotations == nil { - backup.Annotations = make(map[string]string) - } - backup.Annotations[dataProtectionBackupTargetPodKey] = targetPod.Name - controllerutil.AddFinalizer(backup, dataProtectionFinalizerName) - if reflect.DeepEqual(oldBackup.ObjectMeta, backup.ObjectMeta) { - return false, nil - } - return true, r.Client.Patch(reqCtx.Ctx, backup, client.MergeFrom(oldBackup)) -} - -func (r *BackupReconciler) createPreCommandJobAndEnsure(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - snapshotPolicy *dataprotectionv1alpha1.SnapshotPolicy) (bool, error) { - - emptyCmd, err := r.ensureEmptyHooksCommand(snapshotPolicy, true) - if err != nil { - return false, err - } - // if undefined commands, skip create job. - if emptyCmd { - return true, err - } - - mgrNS := viper.GetString(constant.CfgKeyCtrlrMgrNS) - key := types.NamespacedName{Namespace: mgrNS, Name: generateUniqueJobName(backup, "hook-pre")} - if err := r.createHooksCommandJob(reqCtx, backup, snapshotPolicy, key, true); err != nil { - return false, err - } - return r.ensureBatchV1JobCompleted(reqCtx, key) -} - -func (r *BackupReconciler) createPostCommandJobAndEnsure(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - snapshotPolicy *dataprotectionv1alpha1.SnapshotPolicy) (bool, error) { - - emptyCmd, err := r.ensureEmptyHooksCommand(snapshotPolicy, false) - if err != nil { - return false, err - } - // if undefined commands, skip create job. - if emptyCmd { - return true, err - } - - mgrNS := viper.GetString(constant.CfgKeyCtrlrMgrNS) - key := types.NamespacedName{Namespace: mgrNS, Name: generateUniqueJobName(backup, "hook-post")} - if err = r.createHooksCommandJob(reqCtx, backup, snapshotPolicy, key, false); err != nil { - return false, err - } - return r.ensureBatchV1JobCompleted(reqCtx, key) -} - -func (r *BackupReconciler) ensureBatchV1JobCompleted( - reqCtx intctrlutil.RequestCtx, key types.NamespacedName) (bool, error) { - job := &batchv1.Job{} - exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, key, job) - if err != nil { - return false, err - } - if exists { - if containsJobCondition(job, batchv1.JobComplete) { - return true, nil - } - if containsJobCondition(job, batchv1.JobFailed) { - return false, intctrlutil.NewBackupJobFailed(job.Name) - } - } - return false, nil -} - -func (r *BackupReconciler) createVolumeSnapshot( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - snapshotPolicy *dataprotectionv1alpha1.SnapshotPolicy) error { - - snap := &snapshotv1.VolumeSnapshot{} - exists, err := r.snapshotCli.CheckResourceExists(reqCtx.Req.NamespacedName, snap) - if err != nil { - return err - } - if exists { - // find resource object, skip created. - return nil - } - - // get backup policy - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} - backupPolicyNameSpaceName := types.NamespacedName{ - Namespace: reqCtx.Req.Namespace, - Name: backup.Spec.BackupPolicyName, - } - - if err := r.Get(reqCtx.Ctx, backupPolicyNameSpaceName, backupPolicy); err != nil { - reqCtx.Log.Error(err, "Unable to get backupPolicy for backup.", "backupPolicy", backupPolicyNameSpaceName) +// deleteExternalResources deletes the external workloads that execute backup. +// Currently, it only supports two types of workloads: statefulSet and job. +func (r *BackupReconciler) deleteExternalResources( + reqCtx intctrlutil.RequestCtx, backup *dpv1alpha1.Backup) error { + if err := r.deleteExternalStatefulSet(reqCtx, backup); err != nil { return err } - - targetPVCs, err := r.getTargetPVCs(reqCtx, backup, snapshotPolicy.Target.LabelsSelector.MatchLabels) - if err != nil { - return err - } - for _, target := range targetPVCs { - snapshotName := backup.Name - vsc := snapshotv1.VolumeSnapshotClass{} - if target.Spec.StorageClassName != nil { - if err = r.getVolumeSnapshotClassOrCreate(reqCtx.Ctx, *target.Spec.StorageClassName, &vsc); err != nil { - return err - } - } - labels := buildBackupWorkloadsLabels(backup) - labels[constant.VolumeTypeLabelKey] = target.Labels[constant.VolumeTypeLabelKey] - if target.Labels[constant.VolumeTypeLabelKey] == string(appsv1alpha1.VolumeTypeLog) { - snapshotName += "-log" - } - snap = &snapshotv1.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: reqCtx.Req.Namespace, - Name: snapshotName, - Labels: labels, - }, - Spec: snapshotv1.VolumeSnapshotSpec{ - Source: snapshotv1.VolumeSnapshotSource{ - PersistentVolumeClaimName: &target.Name, - }, - VolumeSnapshotClassName: &vsc.Name, - }, - } - - controllerutil.AddFinalizer(snap, dataProtectionFinalizerName) - if err = controllerutil.SetControllerReference(backup, snap, r.Scheme); err != nil { - return err - } - - reqCtx.Log.V(1).Info("create a volumeSnapshot from backup", "snapshot", snap.Name) - if err = r.snapshotCli.Create(snap); err != nil && !apierrors.IsAlreadyExists(err) { - return err - } - } - msg := fmt.Sprintf("Waiting for the volume snapshot %s creation to complete in backup.", snap.Name) - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatingVolumeSnapshot", msg) - return nil -} - -func (r *BackupReconciler) getVolumeSnapshotClassOrCreate(ctx context.Context, storageClassName string, vsc *snapshotv1.VolumeSnapshotClass) error { - storageClassObj := storagev1.StorageClass{} - if err := r.Client.Get(ctx, types.NamespacedName{Name: storageClassName}, &storageClassObj); err != nil { - // ignore if not found storage class, use the default volume snapshot class - return client.IgnoreNotFound(err) - } - vscList := snapshotv1.VolumeSnapshotClassList{} - if err := r.snapshotCli.List(&vscList); err != nil { - return err - } - for _, item := range vscList.Items { - if item.Driver == storageClassObj.Provisioner { - *vsc = item - return nil - } - } - // not found matched volume snapshot class, create one - vscName := fmt.Sprintf("vsc-%s-%s", storageClassName, storageClassObj.UID[:8]) - newVSC := ctrlbuilder.BuildVolumeSnapshotClass(vscName, storageClassObj.Provisioner) - if err := r.snapshotCli.Create(newVSC); err != nil { - return err - } - *vsc = *newVSC - return nil -} - -func (r *BackupReconciler) ensureVolumeSnapshotReady( - key types.NamespacedName) (bool, *metav1.Time, error) { - snap := &snapshotv1.VolumeSnapshot{} - // not found, continue the creation process - exists, err := r.snapshotCli.CheckResourceExists(key, snap) - if err != nil { - return false, nil, err - } - if exists && snap.Status != nil { - // check if snapshot status throws an error, e.g. csi does not support volume snapshot - if isVolumeSnapshotConfigError(snap) { - return false, nil, errors.New(*snap.Status.Error.Message) - } - if snap.Status.ReadyToUse != nil && *snap.Status.ReadyToUse { - return true, snap.Status.CreationTime, nil - } - } - return false, nil, nil -} - -func (r *BackupReconciler) createUpdatesJobs(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - basePolicy *dataprotectionv1alpha1.BasePolicy, - backupDestinationPath string, - stage dataprotectionv1alpha1.BackupStatusUpdateStage) error { - // get backup policy - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} - backupPolicyNameSpaceName := types.NamespacedName{ - Namespace: reqCtx.Req.Namespace, - Name: backup.Spec.BackupPolicyName, - } - if err := r.Get(reqCtx.Ctx, backupPolicyNameSpaceName, backupPolicy); err != nil { - reqCtx.Log.V(1).Error(err, "Unable to get backupPolicy for backup.", "backupPolicy", backupPolicyNameSpaceName) - return err - } - for index, update := range basePolicy.BackupStatusUpdates { - if update.UpdateStage != stage { - continue - } - if err := r.createMetadataCollectionJob(reqCtx, backup, commonPolicy, basePolicy, backupDestinationPath, update, index); err != nil { - return err - } - } - return nil -} - -func (r *BackupReconciler) createMetadataCollectionJob(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - basePolicy *dataprotectionv1alpha1.BasePolicy, - backupDestinationPath string, - updateInfo dataprotectionv1alpha1.BackupStatusUpdate, - index int) error { - jobNamespace := viper.GetString(constant.CfgKeyCtrlrMgrNS) - // if specified to use the service account of target pod, the namespace should be the namespace of backup. - if updateInfo.UseTargetPodServiceAccount { - jobNamespace = backup.Namespace - } - key := types.NamespacedName{Namespace: jobNamespace, Name: generateUniqueJobName(backup, fmt.Sprintf("status-%d-%s", index, string(updateInfo.UpdateStage)))} - job := &batchv1.Job{} - // check if job is created - if exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, key, job); err != nil { - return err - } else if exists { - return nil - } - - // build job and create - jobPodSpec, err := r.buildMetadataCollectionPodSpec(reqCtx, backup, commonPolicy, basePolicy, backupDestinationPath, updateInfo) - if err != nil { - return err - } - job = ctrlbuilder.BuildBackupManifestsJob(key, backup, &jobPodSpec) - msg := fmt.Sprintf("creating job %s", key.Name) - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatingJob-"+key.Name, msg) - return client.IgnoreAlreadyExists(r.Client.Create(reqCtx.Ctx, job)) -} - -func (r *BackupReconciler) createDeleteBackupFileJob( - reqCtx intctrlutil.RequestCtx, - jobKey types.NamespacedName, - backup *dataprotectionv1alpha1.Backup, - backupPVCName string, - backupFilePath string) error { - - // make sure the path has a leading slash - if !strings.HasPrefix(backupFilePath, "/") { - backupFilePath = "/" + backupFilePath - } - - // this script first deletes the directory where the backup is located (including files - // in the directory), and then traverses up the path level by level to clean up empty directories. - deleteScript := fmt.Sprintf(` - backupPathBase=%s; - targetPath="${backupPathBase}%s"; - - echo "removing backup files in ${targetPath}"; - rm -rf "${targetPath}"; - - absBackupPathBase=$(realpath "${backupPathBase}"); - curr=$(realpath "${targetPath}"); - while true; do - parent=$(dirname "${curr}"); - if [ "${parent}" == "${absBackupPathBase}" ]; then - echo "reach backupPathBase ${backupPathBase}, done"; - break; - fi; - if [ ! "$(ls -A "${parent}")" ]; then - echo "${parent} is empty, removing it..."; - rmdir "${parent}"; - else - echo "${parent} is not empty, done"; - break; - fi; - curr="${parent}"; - done - `, backupPathBase, backupFilePath) - - // build container - container := corev1.Container{} - container.Name = backup.Name - container.Command = []string{"sh", "-c"} - container.Args = []string{deleteScript} - container.Image = viper.GetString(constant.KBToolsImage) - container.ImagePullPolicy = corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)) - - allowPrivilegeEscalation := false - runAsUser := int64(0) - container.SecurityContext = &corev1.SecurityContext{ - AllowPrivilegeEscalation: &allowPrivilegeEscalation, - RunAsUser: &runAsUser, - } - intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) - - // build pod - podSpec := corev1.PodSpec{ - Containers: []corev1.Container{container}, - RestartPolicy: corev1.RestartPolicyNever, - } - - // mount the backup volume to the pod - r.appendBackupVolumeMount(backupPVCName, &podSpec, &podSpec.Containers[0]) - - if err := addTolerations(&podSpec); err != nil { - return err - } - - // build job - backOffLimit := int32(3) - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: jobKey.Namespace, - Name: jobKey.Name, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: jobKey.Namespace, - Name: jobKey.Name, - }, - Spec: podSpec, - }, - BackoffLimit: &backOffLimit, - }, - } - if err := controllerutil.SetControllerReference(backup, job, r.Scheme); err != nil { - return err - } - reqCtx.Log.V(1).Info("create a job from delete backup files", "job", job) - return client.IgnoreAlreadyExists(r.Client.Create(reqCtx.Ctx, job)) -} - -func (r *BackupReconciler) createBackupToolJob( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - backupDestinationPath string) error { - - key := types.NamespacedName{Namespace: backup.Namespace, Name: backup.Name} - job := batchv1.Job{} - exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, key, &job) - if err != nil { - return err - } - if exists { - // find resource object, skip created. - return nil - } - - toolPodSpec, err := r.buildBackupToolPodSpec(reqCtx, backup, backupPolicy, commonPolicy, backupDestinationPath) - if err != nil { - return err - } - - if err = r.createBatchV1Job(reqCtx, key, backup, toolPodSpec); err != nil { - return err - } - msg := fmt.Sprintf("Waiting for the job %s creation to complete.", key.Name) - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatingJob", msg) - return nil -} - -// ensureEmptyHooksCommand determines whether it has empty commands in the hooks -func (r *BackupReconciler) ensureEmptyHooksCommand( - snapshotPolicy *dataprotectionv1alpha1.SnapshotPolicy, - preCommand bool) (bool, error) { - // return true directly, means hooks commands is empty, skip subsequent hook jobs. - if snapshotPolicy.Hooks == nil { - return true, nil - } - - commands := snapshotPolicy.Hooks.PostCommands - if preCommand { - commands = snapshotPolicy.Hooks.PreCommands - } - if len(commands) == 0 { - return true, nil - } - return false, nil -} - -func (r *BackupReconciler) createHooksCommandJob( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - snapshotPolicy *dataprotectionv1alpha1.SnapshotPolicy, - key types.NamespacedName, - preCommand bool) error { - - job := batchv1.Job{} - exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, key, &job) - if err != nil { - return err - } - if exists { - // find resource object, skip created. - return nil - } - - jobPodSpec, err := r.buildSnapshotPodSpec(reqCtx, backup, snapshotPolicy, preCommand) - if err != nil { - return err - } - - msg := fmt.Sprintf("Waiting for the job %s creation to complete.", key.Name) - r.Recorder.Event(backup, corev1.EventTypeNormal, "CreatingJob-"+key.Name, msg) - - return r.createBatchV1Job(reqCtx, key, backup, jobPodSpec) -} - -func (r *BackupReconciler) createBatchV1Job( - reqCtx intctrlutil.RequestCtx, - key types.NamespacedName, - backup *dataprotectionv1alpha1.Backup, - templatePodSpec corev1.PodSpec) error { - - backOffLimit := int32(3) - job := &batchv1.Job{ - // TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Namespace: key.Namespace, - Name: key.Name, - Labels: buildBackupWorkloadsLabels(backup), - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: key.Namespace, - Name: key.Name}, - Spec: templatePodSpec, - }, - BackoffLimit: &backOffLimit, - }, - } - controllerutil.AddFinalizer(job, dataProtectionFinalizerName) - if backup.Namespace == job.Namespace { - if err := controllerutil.SetControllerReference(backup, job, r.Scheme); err != nil { - return err - } - } - - reqCtx.Log.V(1).Info("create a built-in job from backup", "job", job) - return client.IgnoreAlreadyExists(r.Client.Create(reqCtx.Ctx, job)) -} - -func (r *BackupReconciler) deleteReferenceBatchV1Jobs(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) error { - jobs := &batchv1.JobList{} - namespace := backup.Namespace - if backup.Spec.BackupType == dataprotectionv1alpha1.BackupTypeSnapshot { - namespace = viper.GetString(constant.CfgKeyCtrlrMgrNS) - } - if err := r.Client.List(reqCtx.Ctx, jobs, - client.InNamespace(namespace), - client.MatchingLabels(buildBackupWorkloadsLabels(backup))); err != nil { - return err - } - - for _, job := range jobs.Items { - if controllerutil.ContainsFinalizer(&job, dataProtectionFinalizerName) { - patch := client.MergeFrom(job.DeepCopy()) - controllerutil.RemoveFinalizer(&job, dataProtectionFinalizerName) - if err := r.Patch(reqCtx.Ctx, &job, patch); err != nil { - return err - } - } - - if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &job); err != nil { - return err - } - } - return nil -} - -func (r *BackupReconciler) deleteReferenceVolumeSnapshot(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) error { - snaps := &snapshotv1.VolumeSnapshotList{} - - if err := r.snapshotCli.List(snaps, - client.InNamespace(reqCtx.Req.Namespace), - client.MatchingLabels(buildBackupWorkloadsLabels(backup))); err != nil { - return err - } - for _, i := range snaps.Items { - if controllerutil.ContainsFinalizer(&i, dataProtectionFinalizerName) { - patch := i.DeepCopy() - controllerutil.RemoveFinalizer(&i, dataProtectionFinalizerName) - if err := r.snapshotCli.Patch(&i, patch); err != nil { - return err - } - } - if err := r.snapshotCli.Delete(&i); err != nil { - return err - } - } - return nil -} - -func (r *BackupReconciler) handleDeleteBackupFiles(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) (*batchv1.Job, error) { - if backup.Spec.BackupType == dataprotectionv1alpha1.BackupTypeSnapshot { - // no file to delete for this type - return nil, nil - } - if backup.Status.Phase == dataprotectionv1alpha1.BackupNew { - // nothing to delete - return nil, nil - } - jobKey := buildDeleteBackupFilesJobNamespacedName(backup) - job := &batchv1.Job{} - exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, jobKey, job) - if err != nil { - return nil, err - } - // create job for deleting backup files - if !exists { - pvcName := backup.Status.PersistentVolumeClaimName - if pvcName == "" { - reqCtx.Log.Info("skip deleting backup files because PersistentVolumeClaimName is empty", - "backup", backup.Name) - return nil, nil - } - // check if pvc exists - if err = r.Client.Get(reqCtx.Ctx, types.NamespacedName{Namespace: backup.Namespace, Name: pvcName}, &corev1.PersistentVolumeClaim{}); err != nil { - if apierrors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - - backupFilePath := "" - if backup.Status.Manifests != nil && backup.Status.Manifests.BackupTool != nil { - backupFilePath = backup.Status.Manifests.BackupTool.FilePath - } - if backupFilePath == "" || !strings.Contains(backupFilePath, backup.Name) { - // For compatibility: the FilePath field is changing from time to time, - // and it may not contain the backup name as a path component if the Backup object - // was created in a previous version. In this case, it's dangerous to execute - // the deletion command. For example, files belongs to other Backups can be deleted as well. - reqCtx.Log.Info("skip deleting backup files because backupFilePath is invalid", - "backupFilePath", backupFilePath, "backup", backup.Name) - return nil, nil - } - // the job will run in the background - return job, r.createDeleteBackupFileJob(reqCtx, jobKey, backup, pvcName, backupFilePath) - } - return job, nil -} - -// deleteReferenceStatefulSet deletes the referenced statefulSet. -func (r *BackupReconciler) deleteReferenceStatefulSet(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) error { - sts := &appsv1.StatefulSet{} - exists, err := intctrlutil.CheckResourceExists(reqCtx.Ctx, r.Client, types.NamespacedName{ - Namespace: backup.Namespace, - Name: backup.Name, - }, sts) - if err != nil { - return err - } - if !exists && !model.IsOwnerOf(backup, sts) { - return nil - } - patch := client.MergeFrom(sts.DeepCopy()) - controllerutil.RemoveFinalizer(sts, dataProtectionFinalizerName) - if err = r.Client.Patch(reqCtx.Ctx, sts, patch); err != nil { - return err - } - return intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, sts) -} - -func (r *BackupReconciler) deleteExternalResources(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.Backup) error { - if err := r.deleteReferenceStatefulSet(reqCtx, backup); err != nil { - return err - } - if err := r.deleteReferenceBatchV1Jobs(reqCtx, backup); err != nil { - return err - } - if err := r.deleteReferenceVolumeSnapshot(reqCtx, backup); err != nil { - return err - } - return nil -} - -// getTargetPod gets the target pod by label selector. -// if the backup has obtained the target pod from label selector, it will be set to the annotations. -// then get the pod from this annotation to ensure that the same pod is picked up in future. -func (r *BackupReconciler) getTargetPod(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, labels map[string]string) (*corev1.Pod, error) { - reqCtx.Log.V(1).Info("Get pod from label", "label", labels) - targetPod := &corev1.PodList{} - if err := r.Client.List(reqCtx.Ctx, targetPod, - client.InNamespace(reqCtx.Req.Namespace), - client.MatchingLabels(labels)); err != nil { - return nil, err - } - if len(targetPod.Items) == 0 { - return nil, errors.New("can not find any pod to backup by labelsSelector") - } - sort.Sort(intctrlutil.ByPodName(targetPod.Items)) - targetPodName := backup.Annotations[dataProtectionBackupTargetPodKey] - for _, v := range targetPod.Items { - if targetPodName == v.Name { - return &v, nil - } - } - return &targetPod.Items[0], nil -} - -func (r *BackupReconciler) getTargetPVCs(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, podLabels map[string]string) ([]corev1.PersistentVolumeClaim, error) { - targetPod, err := r.getTargetPod(reqCtx, backup, podLabels) - if err != nil { - return nil, err - } - tempPVC := corev1.PersistentVolumeClaim{} - var dataPVC *corev1.PersistentVolumeClaim - var logPVC *corev1.PersistentVolumeClaim - for _, volume := range targetPod.Spec.Volumes { - if volume.PersistentVolumeClaim == nil { - continue - } - pvcKey := types.NamespacedName{Namespace: backup.Namespace, Name: volume.PersistentVolumeClaim.ClaimName} - if err = r.Client.Get(reqCtx.Ctx, pvcKey, &tempPVC); err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - switch tempPVC.Labels[constant.VolumeTypeLabelKey] { - case string(appsv1alpha1.VolumeTypeData): - dataPVC = tempPVC.DeepCopy() - case string(appsv1alpha1.VolumeTypeLog): - logPVC = tempPVC.DeepCopy() - } - } - - if dataPVC == nil { - return nil, errors.New("can not find any pvc to backup with labelsSelector") - } - - allPVCs := []corev1.PersistentVolumeClaim{*dataPVC} - if logPVC != nil { - allPVCs = append(allPVCs, *logPVC) - } - - return allPVCs, nil -} - -func (r *BackupReconciler) appendBackupVolumeMount( - pvcName string, - podSpec *corev1.PodSpec, - container *corev1.Container) { - // TODO(dsj): mount multi remote backup volumes - remoteVolumeName := fmt.Sprintf("backup-%s", pvcName) - remoteVolume := corev1.Volume{ - Name: remoteVolumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - } - remoteVolumeMount := corev1.VolumeMount{ - Name: remoteVolumeName, - MountPath: backupPathBase, - } - podSpec.Volumes = append(podSpec.Volumes, remoteVolume) - container.VolumeMounts = append(container.VolumeMounts, remoteVolumeMount) -} - -func (r *BackupReconciler) buildBackupToolPodSpec(reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - pathPrefix string) (corev1.PodSpec, error) { - podSpec := corev1.PodSpec{} - // get backup tool - backupTool, err := getBackupToolByName(reqCtx, r.Client, commonPolicy.BackupToolName) - if err != nil { - return podSpec, err - } - // TODO: check if pvc exists - clusterPod, err := r.getTargetPod(reqCtx, backup, commonPolicy.Target.LabelsSelector.MatchLabels) - if err != nil { - return podSpec, err - } - - // build pod dns string - container := corev1.Container{} - container.Name = backup.Name - container.Command = backupTool.Spec.BackupCommands - container.Image = backupTool.Spec.Image - container.ImagePullPolicy = corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)) - if container.Image == "" { - // TODO(dsj): need determine container name to get, temporary use first container - container.Image = clusterPod.Spec.Containers[0].Image - } - if backupTool.Spec.Resources != nil { - container.Resources = *backupTool.Spec.Resources - } - container.VolumeMounts = clusterPod.Spec.Containers[0].VolumeMounts - - allowPrivilegeEscalation := false - runAsUser := int64(0) - container.SecurityContext = &corev1.SecurityContext{ - AllowPrivilegeEscalation: &allowPrivilegeEscalation, - RunAsUser: &runAsUser} - - intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) - - envBackupName := corev1.EnvVar{ - Name: constant.DPBackupName, - Value: backup.Name, - } - - envBackupDir := corev1.EnvVar{ - Name: constant.DPBackupDIR, - Value: backupPathBase + pathPrefix, - } - - envDBHost := corev1.EnvVar{ - Name: constant.DPDBHost, - Value: intctrlutil.BuildPodHostDNS(clusterPod), - } - - envDPTargetPodName := corev1.EnvVar{ - Name: constant.DPTargetPodName, - Value: clusterPod.Name, - } - - container.Env = []corev1.EnvVar{envDPTargetPodName, envDBHost, envBackupName, envBackupDir} - if commonPolicy.Target.Secret != nil { - envDBUser := corev1.EnvVar{ - Name: constant.DPDBUser, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: commonPolicy.Target.Secret.Name, - }, - Key: commonPolicy.Target.Secret.UsernameKey, - }, - }, - } - - envDBPassword := corev1.EnvVar{ - Name: constant.DPDBPassword, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: commonPolicy.Target.Secret.Name, - }, - Key: commonPolicy.Target.Secret.PasswordKey, - }, - }, - } - - container.Env = append(container.Env, envDBUser, envDBPassword) - } - - if backupPolicy.Spec.Retention != nil && backupPolicy.Spec.Retention.TTL != nil { - ttl := backupPolicy.Spec.Retention.TTL - container.Env = append(container.Env, corev1.EnvVar{ - Name: constant.DPTTL, - Value: *ttl, - }) - // one more day than the configured TTL for logfile backup - logTTL := dataprotectionv1alpha1.AddTTL(ttl, 24) - container.Env = append(container.Env, corev1.EnvVar{ - Name: constant.DPLogfileTTL, - Value: logTTL, - }) - container.Env = append(container.Env, corev1.EnvVar{ - Name: constant.DPLogfileTTLSecond, - Value: strconv.FormatInt(int64(math.Floor(dataprotectionv1alpha1.ToDuration(&logTTL).Seconds())), 10), - }) - } - - // merge env from backup tool. - container.Env = append(container.Env, backupTool.Spec.Env...) - - podSpec.Containers = []corev1.Container{container} - podSpec.Volumes = clusterPod.Spec.Volumes - podSpec.RestartPolicy = corev1.RestartPolicyNever - - // mount the backup volume to the pod of backup tool - pvcName := backup.Status.PersistentVolumeClaimName - r.appendBackupVolumeMount(pvcName, &podSpec, &podSpec.Containers[0]) - - // the pod of job needs to be scheduled on the same node as the workload pod, because it needs to share one pvc - if clusterPod.Spec.NodeName != "" { - podSpec.NodeSelector = map[string]string{ - hostNameLabelKey: clusterPod.Spec.NodeName, - } - } - // ignore taints - podSpec.Tolerations = []corev1.Toleration{ - { - Operator: corev1.TolerationOpExists, - }, - } - return podSpec, nil -} - -func (r *BackupReconciler) buildSnapshotPodSpec( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - snapshotPolicy *dataprotectionv1alpha1.SnapshotPolicy, - preCommand bool) (corev1.PodSpec, error) { - podSpec := corev1.PodSpec{} - - clusterPod, err := r.getTargetPod(reqCtx, backup, snapshotPolicy.Target.LabelsSelector.MatchLabels) - if err != nil { - return podSpec, err - } - - container := corev1.Container{} - container.Name = backup.Name - container.Command = []string{"kubectl", "exec", "-n", backup.Namespace, - "-i", clusterPod.Name, "-c", snapshotPolicy.Hooks.ContainerName, "--", "sh", "-c"} - if preCommand { - container.Args = snapshotPolicy.Hooks.PreCommands - } else { - container.Args = snapshotPolicy.Hooks.PostCommands - } - container.Image = snapshotPolicy.Hooks.Image - if container.Image == "" { - container.Image = viper.GetString(constant.KBToolsImage) - container.ImagePullPolicy = corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)) - } - allowPrivilegeEscalation := false - runAsUser := int64(0) - container.SecurityContext = &corev1.SecurityContext{ - AllowPrivilegeEscalation: &allowPrivilegeEscalation, - RunAsUser: &runAsUser} - intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) - podSpec.Containers = []corev1.Container{container} - podSpec.RestartPolicy = corev1.RestartPolicyNever - podSpec.ServiceAccountName = viper.GetString("KUBEBLOCKS_SERVICEACCOUNT_NAME") - - if err = addTolerations(&podSpec); err != nil { - return podSpec, err - } - - return podSpec, nil -} - -func (r *BackupReconciler) buildMetadataCollectionPodSpec( - reqCtx intctrlutil.RequestCtx, - backup *dataprotectionv1alpha1.Backup, - commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy, - basePolicy *dataprotectionv1alpha1.BasePolicy, - backupDestinationPath string, - updateInfo dataprotectionv1alpha1.BackupStatusUpdate) (corev1.PodSpec, error) { - podSpec := corev1.PodSpec{} - targetPod, err := r.getTargetPod(reqCtx, backup, basePolicy.Target.LabelsSelector.MatchLabels) - if err != nil { - return podSpec, err - } - - container := corev1.Container{} - container.Name = backup.Name - container.Command = []string{"sh", "-c"} - var args string - if strings.TrimSpace(updateInfo.Script) == "" && commonPolicy != nil { - // if not specified script, patch backup status with the json string from ${BACKUP_DIR}/backup.info. - args = "set -o errexit; set -o nounset;" + - "backupInfo=$(cat ${BACKUP_INFO_FILE});echo \"backupInfo:${backupInfo}\";" + - "eval kubectl -n %s patch backup %s --subresource=status --type=merge --patch '{\\\"status\\\":${backupInfo}}';" - args = fmt.Sprintf(args, backup.Namespace, backup.Name) - container.Env = []corev1.EnvVar{ - {Name: "BACKUP_INFO_FILE", Value: buildBackupInfoENV(backupDestinationPath)}, - } - r.appendBackupVolumeMount(backup.Status.PersistentVolumeClaimName, &podSpec, &container) - } else { - args = "set -o errexit; set -o nounset;" + - "OUTPUT=$(kubectl -n %s exec -it pod/%s -c %s -- %s);" + - "kubectl -n %s patch backup %s --subresource=status --type=merge --patch \"%s\";" - statusPath := "status." + updateInfo.Path - if updateInfo.Path == "" { - statusPath = "status" - } - patchJSON := generateJSON(statusPath, "$OUTPUT") - args = fmt.Sprintf(args, targetPod.Namespace, targetPod.Name, updateInfo.ContainerName, - updateInfo.Script, backup.Namespace, backup.Name, patchJSON) - } - if updateInfo.UseTargetPodServiceAccount { - podSpec.ServiceAccountName = targetPod.Spec.ServiceAccountName - } else { - podSpec.ServiceAccountName = viper.GetString("KUBEBLOCKS_SERVICEACCOUNT_NAME") - } - intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) - container.Args = []string{args} - container.Image = viper.GetString(constant.KBToolsImage) - container.ImagePullPolicy = corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)) - podSpec.Containers = []corev1.Container{container} - podSpec.RestartPolicy = corev1.RestartPolicyNever - if err = addTolerations(&podSpec); err != nil { - return podSpec, err - } - return podSpec, nil + return r.deleteExternalJobs(reqCtx, backup) } // getClusterObjectString gets the cluster object and convert it to string. -func (r *BackupReconciler) getClusterObjectString(cluster *appsv1alpha1.Cluster) (*string, error) { +func getClusterObjectString(cluster *appsv1alpha1.Cluster) (*string, error) { // maintain only the cluster's spec and name/namespace. newCluster := &appsv1alpha1.Cluster{ Spec: cluster.Spec, @@ -2015,8 +640,8 @@ func (r *BackupReconciler) getClusterObjectString(cluster *appsv1alpha1.Cluster) } // setClusterSnapshotAnnotation sets the snapshot of cluster to the backup's annotations. -func (r *BackupReconciler) setClusterSnapshotAnnotation(backup *dataprotectionv1alpha1.Backup, cluster *appsv1alpha1.Cluster) error { - clusterString, err := r.getClusterObjectString(cluster) +func setClusterSnapshotAnnotation(backup *dpv1alpha1.Backup, cluster *appsv1alpha1.Cluster) error { + clusterString, err := getClusterObjectString(cluster) if err != nil { return err } diff --git a/controllers/dataprotection/backup_controller_test.go b/controllers/dataprotection/backup_controller_test.go index 1e069cdd350..9604e353709 100644 --- a/controllers/dataprotection/backup_controller_test.go +++ b/controllers/dataprotection/backup_controller_test.go @@ -20,17 +20,14 @@ along with this program. If not, see . package dataprotection import ( - "fmt" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/ghodss/yaml" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,25 +36,16 @@ import ( dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dpbackup "github.com/apecloud/kubeblocks/internal/dataprotection/backup" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + dputils "github.com/apecloud/kubeblocks/internal/dataprotection/utils" "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" viper "github.com/apecloud/kubeblocks/internal/viperx" ) var _ = Describe("Backup Controller test", func() { - const clusterName = "wesql-cluster" - const componentName = "replicasets-primary" - const containerName = "mysql" - const backupPolicyName = "test-backup-policy" - const backupRemotePVCName = "backup-remote-pvc" - const defaultSchedule = "0 3 * * *" - const defaultTTL = "7d" - const backupName = "test-backup-job" - const storageClassName = "test-storage-class" - - viper.SetDefault(constant.CfgKeyCtrlrMgrNS, testCtx.DefaultNamespace) - cleanEnv := func() { // must wait till resources deleted and no longer existed before the testcases start, // otherwise if later it needs to create some new resource objects with the same name, @@ -68,181 +56,143 @@ var _ = Describe("Backup Controller test", func() { // delete rest mocked objects inNS := client.InNamespace(testCtx.DefaultNamespace) ml := client.HasLabels{testCtx.TestObjLabelKey} - testapps.ClearResources(&testCtx, generics.BackupToolSignature, ml) + // namespaced testapps.ClearResources(&testCtx, generics.ClusterSignature, inNS, ml) testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.BackupPolicySignature, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS) + + // wait all backup to be deleted, otherwise the controller maybe create + // job to delete the backup between the ClearResources function delete + // the job and get the job list, resulting the ClearResources panic. + Eventually(testapps.List(&testCtx, generics.BackupSignature, inNS)).Should(HaveLen(0)) + + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupPolicySignature, true, inNS) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS) - testapps.ClearResources(&testCtx, generics.CronJobSignature, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS) + // non-namespaced - testapps.ClearResources(&testCtx, generics.BackupToolSignature, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ActionSetSignature, true, ml) testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupRepoSignature, true, ml) testapps.ClearResources(&testCtx, generics.StorageProviderSignature, ml) } - var nodeName string - var pvcName string - var cluster *appsv1alpha1.Cluster + + var clusterInfo *testdp.BackupClusterInfo BeforeEach(func() { cleanEnv() - viper.Set(constant.CfgKeyCtrlrMgrNS, testCtx.DefaultNamespace) - By("mock a cluster") - cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, - "test-cd", "test-cv").Create(&testCtx).GetObject() - podGenerateName := clusterName + "-" + componentName - By("By mocking a storage class") - _ = testapps.CreateStorageClass(&testCtx, storageClassName, true) - - By("By mocking a pvc belonging to the pod") - pvc := testapps.NewPersistentVolumeClaimFactory( - testCtx.DefaultNamespace, "data-"+podGenerateName+"-0", clusterName, componentName, "data"). - SetStorage("1Gi"). - SetStorageClass(storageClassName). - Create(&testCtx).GetObject() - pvcName = pvc.Name - - By("By mocking a pvc belonging to the pod2") - pvc2 := testapps.NewPersistentVolumeClaimFactory( - testCtx.DefaultNamespace, "data-"+podGenerateName+"-1", clusterName, componentName, "data"). - SetStorage("1Gi"). - SetStorageClass(storageClassName). - Create(&testCtx).GetObject() - - By("By mocking a pod belonging to the statefulset") - volume := corev1.Volume{Name: pvc.Name, VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}}} - pod := testapps.NewPodFactory(testCtx.DefaultNamespace, podGenerateName+"-0"). - AddAppInstanceLabel(clusterName). - AddRoleLabel("leader"). - AddAppComponentLabel(componentName). - AddContainer(corev1.Container{Name: containerName, Image: testapps.ApeCloudMySQLImage}). - AddVolume(volume). - Create(&testCtx).GetObject() - nodeName = pod.Spec.NodeName - - By("By mocking a pod 2 belonging to the statefulset") - volume2 := corev1.Volume{Name: pvc2.Name, VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvc2.Name}}} - _ = testapps.NewPodFactory(testCtx.DefaultNamespace, podGenerateName+"-1"). - AddAppInstanceLabel(clusterName). - AddAppComponentLabel(componentName). - AddContainer(corev1.Container{Name: containerName, Image: testapps.ApeCloudMySQLImage}). - AddVolume(volume2). - Create(&testCtx).GetObject() + clusterInfo = testdp.NewFakeCluster(&testCtx) }) AfterEach(func() { cleanEnv() - viper.Set(constant.CfgKeyCtrlrMgrNS, testCtx.DefaultNamespace) }) When("with default settings", func() { var ( - backupTool *dpv1alpha1.BackupTool backupPolicy *dpv1alpha1.BackupPolicy + repoPVCName string + cluster *appsv1alpha1.Cluster + pvcName string + targetPod *corev1.Pod ) + BeforeEach(func() { - By("By creating a backupTool") - backupTool = testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName()) - - By("By creating a backupPolicy from backupTool: " + backupTool.Name) - backupPolicy = testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - SetTTL(defaultTTL). - AddSnapshotPolicy(). - SetSchedule(defaultSchedule, true). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - AddMatchLabels(constant.RoleLabelKey, "leader"). - SetTargetSecretName(clusterName). - AddHookPreCommand("touch /data/mysql/.restore;sync"). - AddHookPostCommand("rm -f /data/mysql/.restore;sync"). - AddDataFilePolicy(). - SetBackupStatusUpdates([]dpv1alpha1.BackupStatusUpdate{ - { - UpdateStage: dpv1alpha1.POST, - }, - }). - SetBackupToolName(backupTool.Name). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - AddMatchLabels(constant.RoleLabelKey, "leader"). - SetTargetSecretName(clusterName). - SetPVC(backupRemotePVCName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - AddLogfilePolicy(). - SetSchedule(defaultSchedule, true). - SetPVC(backupRemotePVCName). - SetBackupToolName(backupTool.Name). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - Create(&testCtx).GetObject() + By("creating an actionSet") + actionSet := testdp.NewFakeActionSet(&testCtx) + + By("creating storage provider") + _ = testdp.NewFakeStorageProvider(&testCtx, nil) + + By("creating backup repo") + _, repoPVCName = testdp.NewFakeBackupRepo(&testCtx, nil) + + By("creating a backupPolicy from actionSet: " + actionSet.Name) + backupPolicy = testdp.NewFakeBackupPolicy(&testCtx, nil) + + cluster = clusterInfo.Cluster + pvcName = clusterInfo.TargetPVC + targetPod = clusterInfo.TargetPod }) - Context("creates a datafile backup", func() { - var backupKey types.NamespacedName - BeforeEach(func() { - // set datafile backup relies on logfile - Expect(testapps.ChangeObj(&testCtx, backupTool, func(tmpObj *dpv1alpha1.BackupTool) { - tmpObj.Spec.Physical.RelyOnLogfile = true - })).Should(Succeed()) + Context("creates a backup", func() { + var ( + backupKey types.NamespacedName + backup *dpv1alpha1.Backup + ) + + getJobKey := func() client.ObjectKey { + return client.ObjectKey{ + Name: dpbackup.GenerateBackupJobName(backup, dpbackup.BackupDataJobNamePrefix), + Namespace: backup.Namespace, + } + } - By("By creating a backup from backupPolicy: " + backupPolicyName) - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() + BeforeEach(func() { + By("creating a backup from backupPolicy " + testdp.BackupPolicyName) + backup = testdp.NewFakeBackup(&testCtx, nil) backupKey = client.ObjectKeyFromObject(backup) }) It("should succeed after job completes", func() { By("check backup status") Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.LogFilePersistentVolumeClaimName).Should(Equal(backupRemotePVCName)) - g.Expect(fetched.Status.Manifests.BackupTool.LogFilePath).Should(ContainSubstring(getCreatedCRNameByBackupPolicy(backupPolicy, dpv1alpha1.BackupTypeLogFile))) + g.Expect(fetched.Status.PersistentVolumeClaimName).Should(Equal(repoPVCName)) + g.Expect(fetched.Status.Path).Should(Equal(dpbackup.BuildBackupPath(fetched, backupPolicy.Spec.PathPrefix))) + g.Expect(fetched.Status.Phase).Should(Equal(dpv1alpha1.BackupPhaseRunning)) })).Should(Succeed()) - By("Check backup job's nodeName equals pod's nodeName") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *batchv1.Job) { - g.Expect(fetched.Spec.Template.Spec.NodeSelector[hostNameLabelKey]).To(Equal(nodeName)) + By("check backup job's nodeName equals pod's nodeName") + Eventually(testapps.CheckObj(&testCtx, getJobKey(), func(g Gomega, fetched *batchv1.Job) { + g.Expect(fetched.Spec.Template.Spec.NodeSelector[corev1.LabelHostname]).To(Equal(targetPod.Spec.NodeName)) })).Should(Succeed()) - patchK8sJobStatus(backupKey, batchv1.JobComplete) + testdp.PatchK8sJobStatus(&testCtx, getJobKey(), batchv1.JobComplete) + + By("backup job should have completed") + Eventually(testapps.CheckObj(&testCtx, getJobKey(), func(g Gomega, fetched *batchv1.Job) { + _, finishedType, _ := dputils.IsJobFinished(fetched) + g.Expect(finishedType).To(Equal(batchv1.JobComplete)) + })).Should(Succeed()) - By("Check backup job completed") + By("backup should have completed") Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) - g.Expect(fetched.Status.SourceCluster).Should(Equal(clusterName)) - g.Expect(fetched.Labels[constant.DataProtectionLabelClusterUIDKey]).Should(Equal(string(cluster.UID))) - g.Expect(fetched.Labels[constant.AppInstanceLabelKey]).Should(Equal(clusterName)) - g.Expect(fetched.Labels[constant.KBAppComponentLabelKey]).Should(Equal(componentName)) + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseCompleted)) + g.Expect(fetched.Labels[dptypes.DataProtectionLabelClusterUIDKey]).Should(Equal(string(cluster.UID))) + g.Expect(fetched.Labels[constant.AppInstanceLabelKey]).Should(Equal(testdp.ClusterName)) + g.Expect(fetched.Labels[constant.KBAppComponentLabelKey]).Should(Equal(testdp.ComponentName)) g.Expect(fetched.Annotations[constant.ClusterSnapshotAnnotationKey]).ShouldNot(BeEmpty()) })).Should(Succeed()) - By("Check backup job is deleted after completed") - Eventually(testapps.CheckObjExists(&testCtx, backupKey, &batchv1.Job{}, false)).Should(Succeed()) + By("backup job should be deleted after backup completed") + Eventually(testapps.CheckObjExists(&testCtx, getJobKey(), &batchv1.Job{}, false)).Should(Succeed()) }) It("should fail after job fails", func() { - patchK8sJobStatus(backupKey, batchv1.JobFailed) + testdp.PatchK8sJobStatus(&testCtx, getJobKey(), batchv1.JobFailed) + + By("check backup job failed") + Eventually(testapps.CheckObj(&testCtx, getJobKey(), func(g Gomega, fetched *batchv1.Job) { + _, finishedType, _ := dputils.IsJobFinished(fetched) + g.Expect(finishedType).To(Equal(batchv1.JobFailed)) + })).Should(Succeed()) - By("Check backup job failed") + By("check backup failed") Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseFailed)) })).Should(Succeed()) }) }) - Context("deletes a datafile backup", func() { - var backupKey types.NamespacedName - var backup *dpv1alpha1.Backup + Context("deletes a backup", func() { + var ( + backupKey types.NamespacedName + backup *dpv1alpha1.Backup + ) BeforeEach(func() { - By("creating a backup from backupPolicy: " + backupPolicyName) - backup = testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() + By("creating a backup from backupPolicy " + testdp.BackupPolicyName) + backup = testdp.NewFakeBackup(&testCtx, nil) backupKey = client.ObjectKeyFromObject(backup) By("waiting for finalizers to be added") @@ -250,15 +200,11 @@ var _ = Describe("Backup Controller test", func() { g.Expect(backup.GetFinalizers()).ToNot(BeEmpty()) })).Should(Succeed()) - By("setting backup file path") + By("setting backup status") Eventually(testapps.ChangeObjStatus(&testCtx, backup, func() { - if backup.Status.Manifests == nil { - backup.Status.Manifests = &dpv1alpha1.ManifestsStatus{} - } - if backup.Status.Manifests.BackupTool == nil { - backup.Status.Manifests.BackupTool = &dpv1alpha1.BackupToolManifestsStatus{} + if backup.Status.PersistentVolumeClaimName == "" { + backup.Status.PersistentVolumeClaimName = repoPVCName } - backup.Status.Manifests.BackupTool.FilePath = "/" + backupName backup.Status.StartTimestamp = &metav1.Time{Time: time.Now()} })).Should(Succeed()) }) @@ -268,51 +214,47 @@ var _ = Describe("Backup Controller test", func() { testapps.DeleteObject(&testCtx, backupKey, &dpv1alpha1.Backup{}) By("checking new created Job") - jobKey := buildDeleteBackupFilesJobNamespacedName(backup) + jobKey := dpbackup.BuildDeleteBackupFilesJobKey(backup) job := &batchv1.Job{} Eventually(testapps.CheckObjExists(&testCtx, jobKey, job, true)).Should(Succeed()) - volumeName := "backup-" + backupRemotePVCName + volumeName := dpbackup.GenerateBackupRepoVolumeName(repoPVCName) Eventually(testapps.CheckObj(&testCtx, jobKey, func(g Gomega, job *batchv1.Job) { Expect(job.Spec.Template.Spec.Volumes). Should(ContainElement(corev1.Volume{ Name: volumeName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: backupRemotePVCName, + ClaimName: repoPVCName, }, }, })) Expect(job.Spec.Template.Spec.Containers[0].VolumeMounts). Should(ContainElement(corev1.VolumeMount{ Name: volumeName, - MountPath: backupPathBase, + MountPath: dpbackup.RepoVolumeMountPath, })) })).Should(Succeed()) - By("checking Backup object, it should not be deleted") + By("checking backup object, it should not be deleted") Eventually(testapps.CheckObjExists(&testCtx, backupKey, &dpv1alpha1.Backup{}, true)).Should(Succeed()) - By("mock job for deletion to Failed, backup should not be deleted") - Expect(testapps.ChangeObjStatus(&testCtx, job, func() { - job.Status.Conditions = []batchv1.JobCondition{ - { - Type: batchv1.JobFailed, - }, - } - })).Should(Succeed()) + By("mock job for deletion to failed, backup should not be deleted") + testdp.ReplaceK8sJobStatus(&testCtx, jobKey, batchv1.JobFailed) Eventually(testapps.CheckObjExists(&testCtx, backupKey, &dpv1alpha1.Backup{}, true)).Should(Succeed()) By("mock job for deletion to completed, backup should be deleted") - Expect(testapps.ChangeObjStatus(&testCtx, job, func() { - job.Status.Conditions = []batchv1.JobCondition{ - { - Type: batchv1.JobComplete, - }, - } + testdp.ReplaceK8sJobStatus(&testCtx, jobKey, batchv1.JobComplete) + + By("check deletion backup file job completed") + Eventually(testapps.CheckObj(&testCtx, jobKey, func(g Gomega, fetched *batchv1.Job) { + _, finishedType, _ := dputils.IsJobFinished(fetched) + g.Expect(finishedType).To(Equal(batchv1.JobComplete)) })).Should(Succeed()) + + By("check backup deleted") Eventually(testapps.CheckObjExists(&testCtx, backupKey, &dpv1alpha1.Backup{}, false)).Should(Succeed()) @@ -321,100 +263,53 @@ var _ = Describe("Backup Controller test", func() { }) Context("creates a snapshot backup", func() { - var backupKey types.NamespacedName - var backup *dpv1alpha1.Backup + var ( + backupKey types.NamespacedName + backup *dpv1alpha1.Backup + vsKey client.ObjectKey + ) BeforeEach(func() { viper.Set("VOLUMESNAPSHOT", "true") - viper.Set(constant.CfgKeyCtrlrMgrNS, "default") - viper.Set(constant.CfgKeyCtrlrMgrAffinity, - "{\"nodeAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"preference\":{\"matchExpressions\":[{\"key\":\"kb-controller\",\"operator\":\"In\",\"values\":[\"true\"]}]},\"weight\":100}]}}") - viper.Set(constant.CfgKeyCtrlrMgrTolerations, - "[{\"key\":\"key1\", \"operator\": \"Exists\", \"effect\": \"NoSchedule\"}]") - viper.Set(constant.CfgKeyCtrlrMgrNodeSelector, "{\"beta.kubernetes.io/arch\":\"amd64\"}") - snapshotBackupName := "backup-default-postgres-cluster-20230628104804" - By("By creating a backup from backupPolicy: " + backupPolicyName) - backup = testapps.NewBackupFactory(testCtx.DefaultNamespace, snapshotBackupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeSnapshot). - Create(&testCtx).GetObject() + By("create a backup from backupPolicy " + testdp.BackupPolicyName) + backup = testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { + backup.Spec.BackupMethod = testdp.VSBackupMethodName + }) backupKey = client.ObjectKeyFromObject(backup) + vsKey = client.ObjectKey{ + Name: dputils.GetBackupVolumeSnapshotName(backup.Name, "data"), + Namespace: backup.Namespace, + } }) AfterEach(func() { viper.Set("VOLUMESNAPSHOT", "false") - viper.Set(constant.CfgKeyCtrlrMgrAffinity, "") - viper.Set(constant.CfgKeyCtrlrMgrTolerations, "") - viper.Set(constant.CfgKeyCtrlrMgrNodeSelector, "") }) - It("should success after all jobs complete", func() { - backupPolicyKey := types.NamespacedName{Name: backupPolicyName, Namespace: backupKey.Namespace} - patchBackupPolicySpecBackupStatusUpdates(backupPolicyKey) - - preJobKey := types.NamespacedName{Name: generateUniqueJobName(backup, "hook-pre"), Namespace: backupKey.Namespace} - postJobKey := types.NamespacedName{Name: generateUniqueJobName(backup, "hook-post"), Namespace: backupKey.Namespace} - patchK8sJobStatus(preJobKey, batchv1.JobComplete) - By("Check job tolerations") - Eventually(testapps.CheckObj(&testCtx, preJobKey, func(g Gomega, fetched *batchv1.Job) { - g.Expect(fetched.Spec.Template.Spec.Tolerations).ShouldNot(BeEmpty()) - g.Expect(fetched.Spec.Template.Spec.NodeSelector).ShouldNot(BeEmpty()) - g.Expect(fetched.Spec.Template.Spec.Affinity).ShouldNot(BeNil()) - g.Expect(fetched.Spec.Template.Spec.Affinity.NodeAffinity).ShouldNot(BeNil()) - })).Should(Succeed()) - - patchVolumeSnapshotStatus(backupKey, true) - patchK8sJobStatus(postJobKey, batchv1.JobComplete) - - logJobKey := types.NamespacedName{Name: generateUniqueJobName(backup, "status-0-pre"), Namespace: backupKey.Namespace} - patchK8sJobStatus(logJobKey, batchv1.JobComplete) + It("should success after all volume snapshot ready", func() { + By("patching volumesnapshot status to ready") + testdp.PatchVolumeSnapshotStatus(&testCtx, vsKey, true) - By("Check backup job completed") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) - })).Should(Succeed()) - - sizeJobKey := types.NamespacedName{Name: generateUniqueJobName(backup, "status-1-post"), Namespace: backupKey.Namespace} - patchK8sJobStatus(sizeJobKey, batchv1.JobComplete) - - By("Check pre job cleaned") - Eventually(testapps.CheckObjExists(&testCtx, preJobKey, &batchv1.Job{}, false)).Should(Succeed()) - By("Check post job cleaned") - Eventually(testapps.CheckObjExists(&testCtx, postJobKey, &batchv1.Job{}, false)).Should(Succeed()) - By("Check if the target pod name is correct") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *snapshotv1.VolumeSnapshot) { + By("checking volume snapshot source is equal to pvc") + Eventually(testapps.CheckObj(&testCtx, vsKey, func(g Gomega, fetched *vsv1.VolumeSnapshot) { g.Expect(*fetched.Spec.Source.PersistentVolumeClaimName).To(Equal(pvcName)) })).Should(Succeed()) }) - It("should fail after pre-job fails", func() { - patchK8sJobStatus(types.NamespacedName{Name: generateUniqueJobName(backup, "hook-pre"), Namespace: backupKey.Namespace}, batchv1.JobFailed) - - By("Check backup job failed") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) - })).Should(Succeed()) - }) - It("should fail if volumesnapshot reports error", func() { - - By("patching job status to pass check") - preJobKey := types.NamespacedName{Name: generateUniqueJobName(backup, "hook-pre"), Namespace: backupKey.Namespace} - patchK8sJobStatus(preJobKey, batchv1.JobComplete) - By("patching volumesnapshot status with error") - Eventually(testapps.GetAndChangeObjStatus(&testCtx, backupKey, func(tmpVS *snapshotv1.VolumeSnapshot) { + Eventually(testapps.GetAndChangeObjStatus(&testCtx, vsKey, func(tmpVS *vsv1.VolumeSnapshot) { msg := "Failed to set default snapshot class with error: some error" - vsError := snapshotv1.VolumeSnapshotError{ + vsError := vsv1.VolumeSnapshotError{ Message: &msg, } - snapStatus := snapshotv1.VolumeSnapshotStatus{Error: &vsError} + snapStatus := vsv1.VolumeSnapshotStatus{Error: &vsError} tmpVS.Status = &snapStatus })).Should(Succeed()) By("checking backup failed") Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseFailed)) })).Should(Succeed()) }) }) @@ -428,374 +323,77 @@ var _ = Describe("Backup Controller test", func() { // delete rest mocked objects inNS := client.InNamespace(testCtx.DefaultNamespace) ml := client.HasLabels{testCtx.TestObjLabelKey} - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, + generics.PersistentVolumeClaimSignature, true, inNS, ml) }) It("should fail when disable volumesnapshot", func() { viper.Set("VOLUMESNAPSHOT", "false") - - By("By creating a backup from backupPolicy: " + backupPolicyName) - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeSnapshot). - Create(&testCtx).GetObject() + By("creating a backup from backupPolicy " + testdp.BackupPolicyName) + backup := testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { + backup.Spec.BackupMethod = testdp.VSBackupMethodName + }) backupKey = client.ObjectKeyFromObject(backup) - By("Check backup job failed") + By("check backup failed") Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseFailed)) })).Should(Succeed()) }) It("should fail without pvc", func() { - By("By creating a backup from backupPolicy: " + backupPolicyName) - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeSnapshot). - Create(&testCtx).GetObject() - backupKey = client.ObjectKeyFromObject(backup) - - patchK8sJobStatus(types.NamespacedName{Name: generateUniqueJobName(backup, "hook-pre"), Namespace: backupKey.Namespace}, batchv1.JobComplete) - - By("Check backup job failed") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) - })).Should(Succeed()) - }) - - }) - - Context("creates a logfile backup", func() { - var backupKey types.NamespacedName - - BeforeEach(func() { - backupTool := testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName()) - backupPolicy := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - WithRandomName(). - AddLogfilePolicy(). - SetTTL("7d"). - SetSchedule("*/1 * * * *", true). - SetBackupToolName(backupTool.Name). - SetPVC(backupRemotePVCName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - Create(&testCtx).GetObject() - By("By creating a backup from backupPolicy: " + backupPolicy.Name) - logFileBackupName := getCreatedCRNameByBackupPolicy(backupPolicy, dpv1alpha1.BackupTypeLogFile) - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, logFileBackupName). - SetBackupPolicyName(backupPolicy.Name). - SetBackupType(dpv1alpha1.BackupTypeLogFile). - Create(&testCtx).GetObject() + By("creating a backup from backupPolicy " + testdp.BackupPolicyName) + backup := testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { + backup.Spec.BackupMethod = testdp.VSBackupMethodName + }) backupKey = client.ObjectKeyFromObject(backup) - }) - - It("should succeed", func() { - By("Check backup job's nodeName equals pod's nodeName") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *batchv1.Job) { - g.Expect(fetched.Spec.Template.Spec.NodeSelector[hostNameLabelKey]).To(Equal(nodeName)) - })).Should(Succeed()) - - patchK8sJobStatus(backupKey, batchv1.JobComplete) - - By("Check backup job completed") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) - g.Expect(fetched.Status.SourceCluster).Should(Equal(clusterName)) - g.Expect(fetched.Labels[constant.DataProtectionLabelClusterUIDKey]).Should(Equal(string(cluster.UID))) - g.Expect(fetched.Labels[constant.AppInstanceLabelKey]).Should(Equal(clusterName)) - g.Expect(fetched.Labels[constant.KBAppComponentLabelKey]).Should(Equal(componentName)) - g.Expect(fetched.Annotations[constant.ClusterSnapshotAnnotationKey]).ShouldNot(BeEmpty()) - })).Should(Succeed()) - - By("Check backup job is deleted after completed") - Eventually(testapps.CheckObjExists(&testCtx, backupKey, &batchv1.Job{}, false)).Should(Succeed()) - }) - - It("should succeed if the previous job failed and the current job succeeded", func() { - patchK8sJobStatus(backupKey, batchv1.JobFailed) - - By("Check backup job failed") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) - })).Should(Succeed()) - By("Patch backup Phase to New") - Eventually(testapps.GetAndChangeObjStatus(&testCtx, backupKey, func(fetched *dpv1alpha1.Backup) { - fetched.Status.Phase = dpv1alpha1.BackupNew - })).Should(Succeed()) - - By("Check backup job completed") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupInProgress)) - })).Should(Succeed()) - patchK8sJobStatus(backupKey, batchv1.JobComplete) + By("check backup failed") Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseFailed)) })).Should(Succeed()) }) }) }) - When("with backupTool resources", func() { - Context("creates a datafile backup", func() { - var backupKey types.NamespacedName - var backupPolicy *dpv1alpha1.BackupPolicy - var pathPrefix = "/mysql/backup" - createBackup := func(backupName string) { - By("By creating a backup from backupPolicy: " + backupPolicyName) - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() - backupKey = client.ObjectKeyFromObject(backup) - } - - BeforeEach(func() { - viper.SetDefault(constant.CfgKeyBackupPVCStorageClass, "") - By("By creating a backupTool") - backupTool := testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName(), - func(backupTool *dpv1alpha1.BackupTool) { - backupTool.Spec.Resources = nil - }) - - By("By creating a backupPolicy from backupTool: " + backupTool.Name) - backupPolicy = testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddAnnotations(constant.BackupDataPathPrefixAnnotationKey, pathPrefix). - AddDataFilePolicy(). - SetBackupToolName(backupTool.Name). - SetSchedule(defaultSchedule, true). - SetTTL(defaultTTL). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - SetTargetSecretName(clusterName). - SetPVC(backupRemotePVCName). - Create(&testCtx).GetObject() - - }) - - It("should succeed after job completes", func() { - createBackup(backupName) - patchK8sJobStatus(backupKey, batchv1.JobComplete) - By("Check backup job completed") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) - g.Expect(fetched.Status.Manifests.BackupTool.FilePath).To(Equal(fmt.Sprintf("/%s%s/%s", backupKey.Namespace, pathPrefix, backupKey.Name))) - })).Should(Succeed()) - }) - - It("creates pvc if the specified pvc not exists", func() { - createBackup(backupName) - By("Check pvc created by backup controller") - Eventually(testapps.CheckObjExists(&testCtx, types.NamespacedName{ - Name: backupRemotePVCName, - Namespace: testCtx.DefaultNamespace, - }, &corev1.PersistentVolumeClaim{}, true)).Should(Succeed()) - }) - - It("creates pvc if the specified pvc not exists", func() { - By("set persistentVolumeConfigmap") - configMapName := "pv-template-configmap" - Expect(testapps.ChangeObj(&testCtx, backupPolicy, func(tmpObj *dpv1alpha1.BackupPolicy) { - tmpObj.Spec.Datafile.PersistentVolumeClaim.PersistentVolumeConfigMap = &dpv1alpha1.PersistentVolumeConfigMap{ - Name: configMapName, - Namespace: testCtx.DefaultNamespace, - } - })).Should(Succeed()) - - By("create backup with non existent configmap of pv template") - createBackup(backupName) - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) - g.Expect(fetched.Status.FailureReason).To(ContainSubstring(fmt.Sprintf(`ConfigMap "%s" not found`, configMapName))) - })).Should(Succeed()) - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: testCtx.DefaultNamespace, - }, - Data: map[string]string{}, - } - Expect(testCtx.CreateObj(ctx, configMap)).Should(Succeed()) - - By("create backup with the configmap not contains the key 'persistentVolume'") - createBackup(backupName + "1") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) - g.Expect(fetched.Status.FailureReason).To(ContainSubstring("the persistentVolume template is empty in the configMap")) - })).Should(Succeed()) - - By("create backup with the configmap contains the key 'persistentVolume'") - Expect(testapps.ChangeObj(&testCtx, configMap, func(tmpObj *corev1.ConfigMap) { - pv := corev1.PersistentVolume{ - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteMany, - }, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimRetain, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - CSI: &corev1.CSIPersistentVolumeSource{ - Driver: "kubeblocks.com", - FSType: "ext4", - VolumeHandle: pvcName, - }, - }, - }, - } - pvString, _ := yaml.Marshal(pv) - tmpObj.Data = map[string]string{ - "persistentVolume": string(pvString), - } - })).Should(Succeed()) - createBackup(backupName + "2") - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupInProgress)) - })).Should(Succeed()) - - By("check pvc and pv created by backup controller") - Eventually(testapps.CheckObjExists(&testCtx, types.NamespacedName{ - Name: backupRemotePVCName, - Namespace: testCtx.DefaultNamespace, - }, &corev1.PersistentVolumeClaim{}, true)).Should(Succeed()) - Eventually(testapps.CheckObjExists(&testCtx, types.NamespacedName{ - Name: backupRemotePVCName + "-" + testCtx.DefaultNamespace, - Namespace: testCtx.DefaultNamespace, - }, &corev1.PersistentVolume{}, true)).Should(Succeed()) - - }) - }) - }) When("with exceptional settings", func() { - Context("creates a backup with non existent backup policy", func() { + Context("creates a backup with non-existent backup policy", func() { var backupKey types.NamespacedName BeforeEach(func() { - By("By creating a backup from backupPolicy: " + backupPolicyName) - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() + By("creating a backup from backupPolicy " + testdp.BackupPolicyName) + backup := testdp.NewFakeBackup(&testCtx, nil) backupKey = client.ObjectKeyFromObject(backup) }) - It("Should fail", func() { - By("Check backup status failed") + It("should fail", func() { + By("check backup status failed") Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupFailed)) + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseFailed)) })).Should(Succeed()) }) }) }) - When("with logfile backup", func() { - Context("test logfile backup", func() { - It("testing the legality of logfile backup ", func() { - By("init test resources") - // mock a backupTool - backupTool := createStatefulKindBackupTool() - backupPolicy := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddLogfilePolicy(). - SetTTL("7d"). - SetSchedule("*/1 * * * *", false). - SetBackupToolName(backupTool.Name). - SetPVC(backupRemotePVCName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - Create(&testCtx).GetObject() - By("create logfile backup with a invalid name, expect error") - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, "test-logfile"). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeLogFile). - Create(&testCtx).GetObject() - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, backup *dpv1alpha1.Backup) { - g.Expect(backup.Status.Phase).Should(Equal(dpv1alpha1.BackupFailed)) - expectErr := intctrlutil.NewInvalidLogfileBackupName(backupPolicyName) - g.Expect(backup.Status.FailureReason).Should(Equal(expectErr.Error())) - })).Should(Succeed()) - By("update logfile backup with valid name, but the schedule is disabled, expect error") - backup = testapps.NewBackupFactory(testCtx.DefaultNamespace, getCreatedCRNameByBackupPolicy(backupPolicy, dpv1alpha1.BackupTypeLogFile)). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeLogFile). - Create(&testCtx).GetObject() - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, backup *dpv1alpha1.Backup) { - g.Expect(backup.Status.Phase).Should(Equal(dpv1alpha1.BackupFailed)) - expectErr := intctrlutil.NewBackupScheduleDisabled(string(dpv1alpha1.BackupTypeLogFile), backupPolicyName) - g.Expect(backup.Status.FailureReason).Should(Equal(expectErr.Error())) - })).Should(Succeed()) - }) - }) - }) When("with backup repo", func() { - var sp *storagev1alpha1.StorageProvider - var repo *dpv1alpha1.BackupRepo - var repoPVCName string - var backupTool *dpv1alpha1.BackupTool - - createBackupPolicy := func(pvcName string, repoName string) *dpv1alpha1.BackupPolicy { - builder := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddDataFilePolicy(). - SetPVC(pvcName). - SetBackupRepo(repoName). - SetBackupToolName(backupTool.Name). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName) - return builder.Create(&testCtx).GetObject() - } - - createBackup := func(policy *dpv1alpha1.BackupPolicy, change func(*dpv1alpha1.Backup)) *dpv1alpha1.Backup { - if change == nil { - change = func(*dpv1alpha1.Backup) {} // set nop - } - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Apply(change). - Create(&testCtx).GetObject() - return backup - } - - createStorageProvider := func() *storagev1alpha1.StorageProvider { - sp := testapps.CreateCustomizedObj(&testCtx, "backup/storageprovider.yaml", - &storagev1alpha1.StorageProvider{}) - // the storage provider controller is not running, so set the status manually - Expect(testapps.ChangeObjStatus(&testCtx, sp, func() { - sp.Status.Phase = storagev1alpha1.StorageProviderReady - })).Should(Succeed()) - return sp - } - - createRepo := func(change func(repo *dpv1alpha1.BackupRepo)) (*dpv1alpha1.BackupRepo, string) { - repo := testapps.CreateCustomizedObj(&testCtx, "backup/backuprepo.yaml", - &dpv1alpha1.BackupRepo{}, func(obj *dpv1alpha1.BackupRepo) { - obj.Spec.StorageProviderRef = sp.Name - if change != nil { - change(obj) - } - }) - var repoPVCName string - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(repo), func(g Gomega, repo *dpv1alpha1.BackupRepo) { - g.Expect(repo.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupRepoReady)) - g.Expect(repo.Status.BackupPVCName).ShouldNot(BeEmpty()) - repoPVCName = repo.Status.BackupPVCName - })).Should(Succeed()) - return repo, repoPVCName - } + var ( + repoPVCName string + sp *storagev1alpha1.StorageProvider + repo *dpv1alpha1.BackupRepo + ) BeforeEach(func() { By("creating backup repo") - sp = createStorageProvider() - repo, repoPVCName = createRepo(nil) - - By("creating backup tool") - backupTool = testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName()) + sp = testdp.NewFakeStorageProvider(&testCtx, nil) + repo, repoPVCName = testdp.NewFakeBackupRepo(&testCtx, nil) - viper.SetDefault(constant.CfgKeyBackupPVCName, "") + By("creating actionSet") + _ = testdp.NewFakeActionSet(&testCtx) }) Context("explicitly specify backup repo", func() { It("should use the backup repo specified in the policy", func() { By("creating backup policy and backup") - policy := createBackupPolicy("", repo.Name) - backup := createBackup(policy, nil) + _ = testdp.NewFakeBackupPolicy(&testCtx, nil) + backup := testdp.NewFakeBackup(&testCtx, nil) By("checking backup, it should use the PVC from the backup repo") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, backup *dpv1alpha1.Backup) { g.Expect(backup.Status.PersistentVolumeClaimName).Should(BeEquivalentTo(repoPVCName)) @@ -804,10 +402,14 @@ var _ = Describe("Backup Controller test", func() { It("should use the backup repo specified in the backup object", func() { By("creating a second backup repo") - repo2, repoPVCName2 := createRepo(nil) + repo2, repoPVCName2 := testdp.NewFakeBackupRepo(&testCtx, func(repo *dpv1alpha1.BackupRepo) { + repo.Name += "2" + }) By("creating backup policy and backup") - policy := createBackupPolicy("", repo.Name) - backup := createBackup(policy, func(backup *dpv1alpha1.Backup) { + _ = testdp.NewFakeBackupPolicy(&testCtx, func(backupPolicy *dpv1alpha1.BackupPolicy) { + backupPolicy.Spec.BackupRepoName = &repo.Name + }) + backup := testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { if backup.Labels == nil { backup.Labels = map[string]string{} } @@ -823,8 +425,10 @@ var _ = Describe("Backup Controller test", func() { Context("default backup repo", func() { It("should use the default backup repo if it's not specified", func() { By("creating backup policy and backup") - policy := createBackupPolicy("", "") - backup := createBackup(policy, nil) + _ = testdp.NewFakeBackupPolicy(&testCtx, func(backupPolicy *dpv1alpha1.BackupPolicy) { + backupPolicy.Spec.BackupRepoName = nil + }) + backup := testdp.NewFakeBackup(&testCtx, nil) By("checking backup, it should use the PVC from the backup repo") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, backup *dpv1alpha1.Backup) { g.Expect(backup.Status.PersistentVolumeClaimName).Should(BeEquivalentTo(repoPVCName)) @@ -833,15 +437,17 @@ var _ = Describe("Backup Controller test", func() { It("should associate the default backup repo with the backup object", func() { By("creating backup policy and backup") - policy := createBackupPolicy("", "") - backup := createBackup(policy, nil) + _ = testdp.NewFakeBackupPolicy(&testCtx, func(backupPolicy *dpv1alpha1.BackupPolicy) { + backupPolicy.Spec.BackupRepoName = nil + }) + backup := testdp.NewFakeBackup(&testCtx, nil) By("checking backup labels") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, backup *dpv1alpha1.Backup) { g.Expect(backup.Labels[dataProtectionBackupRepoKey]).Should(BeEquivalentTo(repo.Name)) })).Should(Succeed()) By("creating backup2") - backup2 := createBackup(policy, func(backup *dpv1alpha1.Backup) { + backup2 := testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { backup.Name += "2" }) By("checking backup2 labels") @@ -853,23 +459,28 @@ var _ = Describe("Backup Controller test", func() { Context("multiple default backup repos", func() { var repoPVCName2 string - var policy *dpv1alpha1.BackupPolicy BeforeEach(func() { By("creating a second backup repo") - sp2 := createStorageProvider() - _, repoPVCName2 = createRepo(func(repo *dpv1alpha1.BackupRepo) { + sp2 := testdp.NewFakeStorageProvider(&testCtx, func(sp *storagev1alpha1.StorageProvider) { + sp.Name += "2" + }) + _, repoPVCName2 = testdp.NewFakeBackupRepo(&testCtx, func(repo *dpv1alpha1.BackupRepo) { + repo.Name += "2" repo.Spec.StorageProviderRef = sp2.Name }) By("creating backup policy") - policy = createBackupPolicy("", "") + _ = testdp.NewFakeBackupPolicy(&testCtx, func(backupPolicy *dpv1alpha1.BackupPolicy) { + // set backupRepoName in backupPolicy to nil to make it use the default backup repo + backupPolicy.Spec.BackupRepoName = nil + }) }) It("should fail if there are multiple default backup repos", func() { By("creating backup") - backup := createBackup(policy, nil) + backup := testdp.NewFakeBackup(&testCtx, nil) By("checking backup, it should fail because there are multiple default backup repos") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, backup *dpv1alpha1.Backup) { - g.Expect(backup.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupFailed)) + g.Expect(backup.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupPhaseFailed)) g.Expect(backup.Status.FailureReason).Should(ContainSubstring("multiple default BackupRepo found")) })).Should(Succeed()) }) @@ -885,7 +496,7 @@ var _ = Describe("Backup Controller test", func() { g.Expect(repo.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupRepoFailed)) })).Should(Succeed()) By("creating backup") - backup := createBackup(policy, func(backup *dpv1alpha1.Backup) { + backup := testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { backup.Name = "second-backup" }) By("checking backup, it should use the PVC from repo2") @@ -897,53 +508,22 @@ var _ = Describe("Backup Controller test", func() { }) Context("no backup repo available", func() { - It("should fallback to the legacy PVC settings", func() { + It("should throw error", func() { By("making the backup repo as non-default") Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(repo), func(repo *dpv1alpha1.BackupRepo) { - delete(repo.Annotations, constant.DefaultBackupRepoAnnotationKey) + delete(repo.Annotations, dptypes.DefaultBackupRepoAnnotationKey) })).Should(Succeed()) By("creating backup") - policy := createBackupPolicy("", "") - backup := createBackup(policy, nil) - By("checking backup, it should fail because neither the backup repo nor the legacy PVC are available") + _ = testdp.NewFakeBackupPolicy(&testCtx, func(backupPolicy *dpv1alpha1.BackupPolicy) { + backupPolicy.Spec.BackupRepoName = nil + }) + backup := testdp.NewFakeBackup(&testCtx, nil) + By("checking backup, it should fail because the backup repo are not available") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, backup *dpv1alpha1.Backup) { - g.Expect(backup.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupFailed)) - g.Expect(backup.Status.FailureReason).Should(ContainSubstring("the persistentVolumeClaim name of spec.datafile is empty")) + g.Expect(backup.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupPhaseFailed)) + g.Expect(backup.Status.FailureReason).Should(ContainSubstring("no default BackupRepo found")) })).Should(Succeed()) }) }) }) }) - -func patchK8sJobStatus(key types.NamespacedName, jobStatus batchv1.JobConditionType) { - Eventually(testapps.GetAndChangeObjStatus(&testCtx, key, func(fetched *batchv1.Job) { - jobCondition := batchv1.JobCondition{Type: jobStatus} - fetched.Status.Conditions = append(fetched.Status.Conditions, jobCondition) - })).Should(Succeed()) -} - -func patchVolumeSnapshotStatus(key types.NamespacedName, readyToUse bool) { - Eventually(testapps.GetAndChangeObjStatus(&testCtx, key, func(fetched *snapshotv1.VolumeSnapshot) { - snapStatus := snapshotv1.VolumeSnapshotStatus{ReadyToUse: &readyToUse} - fetched.Status = &snapStatus - })).Should(Succeed()) -} - -func patchBackupPolicySpecBackupStatusUpdates(key types.NamespacedName) { - Eventually(testapps.GetAndChangeObj(&testCtx, key, func(fetched *dpv1alpha1.BackupPolicy) { - fetched.Spec.Snapshot.BackupStatusUpdates = []dpv1alpha1.BackupStatusUpdate{ - { - Path: "manifests.backupLog", - ContainerName: "postgresql", - Script: "echo {\"startTime\": \"2023-03-01T00:00:00Z\", \"stopTime\": \"2023-03-01T00:00:00Z\"}", - UpdateStage: dpv1alpha1.PRE, - }, - { - Path: "manifests.backupTool", - ContainerName: "postgresql", - Script: "echo {\"FilePath\": \"/backup/test.file\"}", - UpdateStage: dpv1alpha1.POST, - }, - } - })).Should(Succeed()) -} diff --git a/controllers/dataprotection/backuppolicy_controller.go b/controllers/dataprotection/backuppolicy_controller.go index 4e2e6e8df8e..1ee9e499e15 100644 --- a/controllers/dataprotection/backuppolicy_controller.go +++ b/controllers/dataprotection/backuppolicy_controller.go @@ -21,44 +21,22 @@ package dataprotection import ( "context" - "encoding/json" - "reflect" - "sort" - "strings" - "time" - "github.com/leaanthony/debme" - "golang.org/x/exp/slices" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - viper "github.com/apecloud/kubeblocks/internal/viperx" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) // BackupPolicyReconciler reconciles a BackupPolicy object type BackupPolicyReconciler struct { client.Client - Scheme *k8sruntime.Scheme + Scheme *runtime.Scheme Recorder record.EventRecorder } @@ -66,22 +44,9 @@ type BackupPolicyReconciler struct { // +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backuppolicies/status,verbs=get;update;patch // +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backuppolicies/finalizers,verbs=update -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get -// +kubebuilder:rbac:groups=batch,resources=cronjobs/finalizers,verbs=update;patch - // Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BackupPolicy object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile +// move the current state of the backuppolicy closer to the desired state. func (r *BackupPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // NOTES: - // setup common request context reqCtx := intctrlutil.RequestCtx{ Ctx: ctx, Req: req, @@ -89,611 +54,51 @@ func (r *BackupPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request Recorder: r.Recorder, } - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} + backupPolicy := &dpv1alpha1.BackupPolicy{} if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, backupPolicy); err != nil { return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } - originBackupPolicy := backupPolicy.DeepCopy() - // handle finalizer - res, err := intctrlutil.HandleCRDeletion(reqCtx, r, backupPolicy, dataProtectionFinalizerName, func() (*ctrl.Result, error) { - return nil, r.deleteExternalResources(reqCtx, backupPolicy) - }) + res, err := intctrlutil.HandleCRDeletion(reqCtx, r, backupPolicy, dptypes.DataProtectionFinalizerName, + func() (*ctrl.Result, error) { + return nil, r.deleteExternalResources(reqCtx, backupPolicy) + }) if res != nil { return *res, err } - // try to remove expired or oldest backups, triggered by cronjob controller - if err = r.removeExpiredBackups(reqCtx); err != nil { - return r.patchStatusFailed(reqCtx, backupPolicy, "RemoveExpiredBackupsFailed", err) + if backupPolicy.Status.ObservedGeneration == backupPolicy.Generation && + backupPolicy.Status.Phase.IsAvailable() { + return ctrl.Result{}, nil } - if err = r.handleSnapshotPolicy(reqCtx, backupPolicy); err != nil { - return r.patchStatusFailed(reqCtx, backupPolicy, "HandleSnapshotPolicyFailed", err) + patchStatus := func(phase dpv1alpha1.Phase, message string) error { + patch := client.MergeFrom(backupPolicy.DeepCopy()) + backupPolicy.Status.Phase = phase + backupPolicy.Status.Message = message + backupPolicy.Status.ObservedGeneration = backupPolicy.Generation + return r.Status().Patch(ctx, backupPolicy, patch) } - if err = r.handleDatafilePolicy(reqCtx, backupPolicy); err != nil { - return r.patchStatusFailed(reqCtx, backupPolicy, "HandleFullPolicyFailed", err) - } + // TODO(ldm): validate backup policy - if err = r.handleLogfilePolicy(reqCtx, backupPolicy); err != nil { - return r.patchStatusFailed(reqCtx, backupPolicy, "HandleIncrementalPolicyFailed", err) + if err = patchStatus(dpv1alpha1.AvailablePhase, ""); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } - - return r.patchStatusAvailable(reqCtx, originBackupPolicy, backupPolicy) + intctrlutil.RecordCreatedEvent(r.Recorder, backupPolicy) + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *BackupPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&dataprotectionv1alpha1.BackupPolicy{}). - Watches(&dataprotectionv1alpha1.Backup{}, r.backupDeleteHandler(), - builder.WithPredicates(predicate.NewPredicateFuncs(filterCreatedByPolicy))). - WithOptions(controller.Options{ - MaxConcurrentReconciles: viper.GetInt(maxConcurDataProtectionReconKey), - }). + For(&dpv1alpha1.BackupPolicy{}). Complete(r) } -func (r *BackupPolicyReconciler) backupDeleteHandler() *handler.Funcs { - return &handler.Funcs{ - DeleteFunc: func(ctx context.Context, event event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) { - backup := event.Object.(*dataprotectionv1alpha1.Backup) - backupPolicy := &dataprotectionv1alpha1.BackupPolicy{} - if err := r.Client.Get(ctx, types.NamespacedName{Name: backup.Spec.BackupPolicyName, Namespace: backup.Namespace}, backupPolicy); err != nil { - return - } - backupType := backup.Spec.BackupType - // if not refer the backupTool, skip - commonPolicy := backupPolicy.Spec.GetCommonPolicy(backupType) - if commonPolicy == nil { - return - } - // if not enable the schedule, skip - schedulerPolicy := backupPolicy.Spec.GetCommonSchedulePolicy(backupType) - if schedulerPolicy != nil && !schedulerPolicy.Enable { - return - } - backupTool := &dataprotectionv1alpha1.BackupTool{} - if err := r.Client.Get(ctx, types.NamespacedName{Name: commonPolicy.BackupToolName}, backupTool); err != nil { - return - } - if backupTool.Spec.DeployKind != dataprotectionv1alpha1.DeployKindStatefulSet { - return - } - _ = r.reconcileForStatefulSetKind(ctx, backupPolicy, backupType, schedulerPolicy.CronExpression) - }, - } -} - -func (r *BackupPolicyReconciler) deleteExternalResources(reqCtx intctrlutil.RequestCtx, backupPolicy *dataprotectionv1alpha1.BackupPolicy) error { - // delete cronjob resource - cronJobList := &batchv1.CronJobList{} - if err := r.Client.List(reqCtx.Ctx, cronJobList, - client.InNamespace(viper.GetString(constant.CfgKeyCtrlrMgrNS)), - client.MatchingLabels{ - dataProtectionLabelBackupPolicyKey: backupPolicy.Name, - constant.AppManagedByLabelKey: constant.AppName, - }, - ); err != nil { - return err - } - for _, cronjob := range cronJobList.Items { - if err := r.removeCronJobFinalizer(reqCtx, &cronjob); err != nil { - return err - } - if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &cronjob); err != nil { - // failed delete k8s job, return error info. - return err - } - } - // notice running backup to completed - backup := &dataprotectionv1alpha1.Backup{} - for _, v := range []dataprotectionv1alpha1.BackupType{dataprotectionv1alpha1.BackupTypeDataFile, - dataprotectionv1alpha1.BackupTypeLogFile, dataprotectionv1alpha1.BackupTypeSnapshot} { - if err := r.Client.Get(reqCtx.Ctx, types.NamespacedName{Namespace: backupPolicy.Namespace, - Name: getCreatedCRNameByBackupPolicy(backupPolicy, v), - }, backup); err != nil { - if apierrors.IsNotFound(err) { - continue - } - return err - } - patch := client.MergeFrom(backup.DeepCopy()) - backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted - backup.Status.CompletionTimestamp = &metav1.Time{Time: time.Now().UTC()} - if err := r.Client.Status().Patch(reqCtx.Ctx, backup, patch); err != nil { - return err - } - } - return nil -} - -// patchStatusAvailable patches backup policy status phase to available. -func (r *BackupPolicyReconciler) patchStatusAvailable(reqCtx intctrlutil.RequestCtx, - originBackupPolicy, - backupPolicy *dataprotectionv1alpha1.BackupPolicy) (ctrl.Result, error) { - if !reflect.DeepEqual(originBackupPolicy.Spec, backupPolicy.Spec) { - if err := r.Client.Update(reqCtx.Ctx, backupPolicy); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - } - // update status phase - if backupPolicy.Status.Phase != dataprotectionv1alpha1.PolicyAvailable || - backupPolicy.Status.ObservedGeneration != backupPolicy.Generation { - patch := client.MergeFrom(backupPolicy.DeepCopy()) - backupPolicy.Status.ObservedGeneration = backupPolicy.Generation - backupPolicy.Status.Phase = dataprotectionv1alpha1.PolicyAvailable - backupPolicy.Status.FailureReason = "" - if err := r.Client.Status().Patch(reqCtx.Ctx, backupPolicy, patch); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - } - return intctrlutil.Reconciled() -} - -// patchStatusFailed patches backup policy status phase to failed. -func (r *BackupPolicyReconciler) patchStatusFailed(reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - reason string, - err error) (ctrl.Result, error) { - if intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeRequeue) { - return intctrlutil.RequeueAfter(reconcileInterval, reqCtx.Log, "") - } - backupPolicyDeepCopy := backupPolicy.DeepCopy() - backupPolicy.Status.Phase = dataprotectionv1alpha1.PolicyFailed - backupPolicy.Status.FailureReason = err.Error() - if !reflect.DeepEqual(backupPolicy.Status, backupPolicyDeepCopy.Status) { - if patchErr := r.Client.Status().Patch(reqCtx.Ctx, backupPolicy, client.MergeFrom(backupPolicyDeepCopy)); patchErr != nil { - return intctrlutil.RequeueWithError(patchErr, reqCtx.Log, "") - } - } - r.Recorder.Event(backupPolicy, corev1.EventTypeWarning, reason, err.Error()) - return intctrlutil.RequeueWithError(err, reqCtx.Log, "") -} - -func (r *BackupPolicyReconciler) removeExpiredBackups(reqCtx intctrlutil.RequestCtx) error { - backups := dataprotectionv1alpha1.BackupList{} - if err := r.Client.List(reqCtx.Ctx, &backups, - client.InNamespace(reqCtx.Req.Namespace)); err != nil { - return err - } - now := metav1.Now() - for _, item := range backups.Items { - // ignore retained backup. - if strings.EqualFold(item.GetLabels()[constant.BackupProtectionLabelKey], constant.BackupRetain) { - continue - } - if item.Status.Expiration != nil && item.Status.Expiration.Before(&now) { - if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &item); err != nil { - // failed delete backups, return error info. - return err - } - } - } - return nil -} - -// removeOldestBackups removes old backups according to backupsHistoryLimit policy. -func (r *BackupPolicyReconciler) removeOldestBackups(reqCtx intctrlutil.RequestCtx, - backupPolicyName string, - backupType dataprotectionv1alpha1.BackupType, - backupsHistoryLimit int32) error { - if backupsHistoryLimit == 0 { - return nil - } - matchLabels := map[string]string{ - dataProtectionLabelBackupPolicyKey: backupPolicyName, - dataProtectionLabelBackupTypeKey: string(backupType), - dataProtectionLabelAutoBackupKey: "true", - } - backups := dataprotectionv1alpha1.BackupList{} - if err := r.Client.List(reqCtx.Ctx, &backups, - client.InNamespace(reqCtx.Req.Namespace), - client.MatchingLabels(matchLabels)); err != nil { - return err - } - // filter final state backups only - backupItems := []dataprotectionv1alpha1.Backup{} - for _, item := range backups.Items { - if item.Status.Phase == dataprotectionv1alpha1.BackupCompleted || - item.Status.Phase == dataprotectionv1alpha1.BackupFailed { - backupItems = append(backupItems, item) - } - } - numToDelete := len(backupItems) - int(backupsHistoryLimit) - if numToDelete <= 0 { - return nil - } - sort.Sort(byBackupStartTime(backupItems)) - for i := 0; i < numToDelete; i++ { - if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &backupItems[i]); err != nil { - // failed delete backups, return error info. - return err - } - } - return nil -} - -// reconcileForStatefulSetKind reconciles the backup which is controlled by backupPolicy. -func (r *BackupPolicyReconciler) reconcileForStatefulSetKind( - ctx context.Context, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - backType dataprotectionv1alpha1.BackupType, - cronExpression string) error { - backupName := getCreatedCRNameByBackupPolicy(backupPolicy, backType) - backup := &dataprotectionv1alpha1.Backup{} - exists, err := intctrlutil.CheckResourceExists(ctx, r.Client, types.NamespacedName{Name: backupName, Namespace: backupPolicy.Namespace}, backup) - if err != nil { - return err - } - patch := client.MergeFrom(backup.DeepCopy()) - backup.Name = backupName - backup.Namespace = backupPolicy.Namespace - if backup.Labels == nil { - backup.Labels = map[string]string{} - } - backup.Labels[constant.AppManagedByLabelKey] = constant.AppName - backup.Labels[dataProtectionLabelBackupPolicyKey] = backupPolicy.Name - backup.Labels[dataProtectionLabelBackupTypeKey] = string(backType) - backup.Labels[dataProtectionLabelAutoBackupKey] = trueVal - if !exists { - if cronExpression == "" { - return nil - } - backup.Spec.BackupType = backType - backup.Spec.BackupPolicyName = backupPolicy.Name - return intctrlutil.IgnoreIsAlreadyExists(r.Client.Create(ctx, backup)) - } - - // notice to reconcile backup CR - if cronExpression != "" && slices.Contains([]dataprotectionv1alpha1.BackupPhase{ - dataprotectionv1alpha1.BackupCompleted, dataprotectionv1alpha1.BackupFailed}, - backup.Status.Phase) { - // if schedule is enabled and backup already is completed, update phase to New - backup.Status.Phase = dataprotectionv1alpha1.BackupNew - backup.Status.FailureReason = "" - return r.Client.Status().Patch(ctx, backup, patch) - } - if backup.Annotations == nil { - backup.Annotations = map[string]string{} - } - backup.Annotations[constant.ReconcileAnnotationKey] = time.Now().Format(time.RFC3339Nano) - return r.Client.Patch(ctx, backup, patch) -} - -// buildCronJob builds cronjob from backup policy. -func (r *BackupPolicyReconciler) buildCronJob( - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - target dataprotectionv1alpha1.TargetCluster, - cronExpression string, - backType dataprotectionv1alpha1.BackupType, - cronJobName string) (*batchv1.CronJob, error) { - tplFile := "cronjob.cue" - cueFS, _ := debme.FS(cueTemplates, "cue") - cueTpl, err := intctrlutil.NewCUETplFromBytes(cueFS.ReadFile(tplFile)) - if err != nil { - return nil, err - } - tolerationPodSpec := corev1.PodSpec{} - if err = addTolerations(&tolerationPodSpec); err != nil { - return nil, err - } - var ttl metav1.Duration - if backupPolicy.Spec.Retention != nil && backupPolicy.Spec.Retention.TTL != nil { - ttl = metav1.Duration{Duration: dataprotectionv1alpha1.ToDuration(backupPolicy.Spec.Retention.TTL)} - } - cueValue := intctrlutil.NewCUEBuilder(*cueTpl) - if cronJobName == "" { - cronJobName = getCreatedCRNameByBackupPolicy(backupPolicy, backType) - } - options := backupPolicyOptions{ - Name: cronJobName, - BackupPolicyName: backupPolicy.Name, - Namespace: backupPolicy.Namespace, - Cluster: target.LabelsSelector.MatchLabels[constant.AppInstanceLabelKey], - Schedule: cronExpression, - TTL: ttl, - BackupType: string(backType), - ServiceAccount: viper.GetString("KUBEBLOCKS_SERVICEACCOUNT_NAME"), - MgrNamespace: viper.GetString(constant.CfgKeyCtrlrMgrNS), - Image: viper.GetString(constant.KBToolsImage), - Tolerations: &tolerationPodSpec, - } - backupPolicyOptionsByte, err := json.Marshal(options) - if err != nil { - return nil, err - } - if err = cueValue.Fill("options", backupPolicyOptionsByte); err != nil { - return nil, err - } - cuePath := "cronjob" - if backType == dataprotectionv1alpha1.BackupTypeLogFile { - cuePath = "cronjob_logfile" - } - cronjobByte, err := cueValue.Lookup(cuePath) - if err != nil { - return nil, err - } - - cronjob := &batchv1.CronJob{} - if err = json.Unmarshal(cronjobByte, cronjob); err != nil { - return nil, err - } - - controllerutil.AddFinalizer(cronjob, dataProtectionFinalizerName) - - // set labels - for k, v := range backupPolicy.Labels { - if cronjob.Labels == nil { - cronjob.SetLabels(map[string]string{}) - } - cronjob.Labels[k] = v - } - cronjob.Labels[dataProtectionLabelBackupPolicyKey] = backupPolicy.Name - cronjob.Labels[dataProtectionLabelBackupTypeKey] = string(backType) - return cronjob, nil -} - -func (r *BackupPolicyReconciler) removeCronJobFinalizer(reqCtx intctrlutil.RequestCtx, cronjob *batchv1.CronJob) error { - patch := client.MergeFrom(cronjob.DeepCopy()) - controllerutil.RemoveFinalizer(cronjob, dataProtectionFinalizerName) - return r.Patch(reqCtx.Ctx, cronjob, patch) -} - -// reconcileCronJob will create/delete/patch cronjob according to cronExpression and policy changes. -func (r *BackupPolicyReconciler) reconcileCronJob(reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - basePolicy dataprotectionv1alpha1.BasePolicy, - schedulePolicy *dataprotectionv1alpha1.SchedulePolicy, - backType dataprotectionv1alpha1.BackupType) error { - // get cronjob from labels - cronJob := &batchv1.CronJob{} - cronJobList := &batchv1.CronJobList{} - if err := r.Client.List(reqCtx.Ctx, cronJobList, - client.InNamespace(viper.GetString(constant.CfgKeyCtrlrMgrNS)), - client.MatchingLabels{ - dataProtectionLabelBackupPolicyKey: backupPolicy.Name, - dataProtectionLabelBackupTypeKey: string(backType), - constant.AppManagedByLabelKey: constant.AppName, - }, - ); err != nil { - return err - } else if len(cronJobList.Items) > 0 { - cronJob = &cronJobList.Items[0] - } - if schedulePolicy == nil || !schedulePolicy.Enable { - if len(cronJob.Name) != 0 { - // delete the old cronjob. - if err := r.removeCronJobFinalizer(reqCtx, cronJob); err != nil { - return err - } - return r.Client.Delete(reqCtx.Ctx, cronJob) - } - // if no cron expression, return - return nil - } - cronjobProto, err := r.buildCronJob(backupPolicy, basePolicy.Target, schedulePolicy.CronExpression, backType, cronJob.Name) - if err != nil { - return err - } - - if backupPolicy.Spec.Schedule.StartingDeadlineMinutes != nil { - startingDeadlineSeconds := *backupPolicy.Spec.Schedule.StartingDeadlineMinutes * 60 - cronjobProto.Spec.StartingDeadlineSeconds = &startingDeadlineSeconds - } - if len(cronJob.Name) == 0 { - // if no cronjob, create it. - return r.Client.Create(reqCtx.Ctx, cronjobProto) - } - // sync the cronjob with the current backup policy configuration. - patch := client.MergeFrom(cronJob.DeepCopy()) - cronJob.Spec.StartingDeadlineSeconds = cronjobProto.Spec.StartingDeadlineSeconds - cronJob.Spec.JobTemplate.Spec.BackoffLimit = &basePolicy.OnFailAttempted - cronJob.Spec.JobTemplate.Spec.Template = cronjobProto.Spec.JobTemplate.Spec.Template - cronJob.Spec.Schedule = schedulePolicy.CronExpression - return r.Client.Patch(reqCtx.Ctx, cronJob, patch) -} - -// handlePolicy handles backup policy. -func (r *BackupPolicyReconciler) handlePolicy(reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - basePolicy dataprotectionv1alpha1.BasePolicy, - schedulePolicy *dataprotectionv1alpha1.SchedulePolicy, - backType dataprotectionv1alpha1.BackupType) error { - - if err := r.reconfigure(reqCtx, backupPolicy, basePolicy, backType); err != nil { - return err - } - // create/delete/patch cronjob workload - if err := r.reconcileCronJob(reqCtx, backupPolicy, basePolicy, schedulePolicy, backType); err != nil { - return err - } - return r.removeOldestBackups(reqCtx, backupPolicy.Name, backType, basePolicy.BackupsHistoryLimit) -} - -// handleSnapshotPolicy handles snapshot policy. -func (r *BackupPolicyReconciler) handleSnapshotPolicy( - reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy) error { - if backupPolicy.Spec.Snapshot == nil { - // TODO delete cronjob if exists - return nil - } - return r.handlePolicy(reqCtx, backupPolicy, backupPolicy.Spec.Snapshot.BasePolicy, - backupPolicy.Spec.Schedule.Snapshot, dataprotectionv1alpha1.BackupTypeSnapshot) -} - -// handleDatafilePolicy handles datafile policy. -func (r *BackupPolicyReconciler) handleDatafilePolicy( - reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy) error { - if backupPolicy.Spec.Datafile == nil { - // TODO delete cronjob if exists - return nil - } - r.setGlobalPersistentVolumeClaim(backupPolicy.Spec.Datafile) - return r.handlePolicy(reqCtx, backupPolicy, backupPolicy.Spec.Datafile.BasePolicy, - backupPolicy.Spec.Schedule.Datafile, dataprotectionv1alpha1.BackupTypeDataFile) -} - -// handleLogFilePolicy handles logfile policy. -func (r *BackupPolicyReconciler) handleLogfilePolicy( - reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy) error { - logfile := backupPolicy.Spec.Logfile - if logfile == nil { - return nil - } - backupTool, err := getBackupToolByName(reqCtx, r.Client, logfile.BackupToolName) - if err != nil { - return err - } - r.setGlobalPersistentVolumeClaim(logfile) - schedule := backupPolicy.Spec.Schedule.Logfile - if backupTool.Spec.DeployKind == dataprotectionv1alpha1.DeployKindStatefulSet { - var cronExpression string - if schedule != nil && schedule.Enable { - cronExpression = schedule.CronExpression - } - if err := r.reconfigure(reqCtx, backupPolicy, logfile.BasePolicy, dataprotectionv1alpha1.BackupTypeLogFile); err != nil { - return err - } - return r.reconcileForStatefulSetKind(reqCtx.Ctx, backupPolicy, dataprotectionv1alpha1.BackupTypeLogFile, cronExpression) - } - return r.handlePolicy(reqCtx, backupPolicy, logfile.BasePolicy, schedule, dataprotectionv1alpha1.BackupTypeLogFile) -} - -// setGlobalPersistentVolumeClaim sets global config of pvc to common policy. -func (r *BackupPolicyReconciler) setGlobalPersistentVolumeClaim(backupPolicy *dataprotectionv1alpha1.CommonBackupPolicy) { - pvcCfg := backupPolicy.PersistentVolumeClaim - globalPVCName := viper.GetString(constant.CfgKeyBackupPVCName) - if (pvcCfg.Name == nil || len(*pvcCfg.Name) == 0) && globalPVCName != "" { - backupPolicy.PersistentVolumeClaim.Name = &globalPVCName - } - - globalInitCapacity := viper.GetString(constant.CfgKeyBackupPVCInitCapacity) - if pvcCfg.InitCapacity.IsZero() && globalInitCapacity != "" { - backupPolicy.PersistentVolumeClaim.InitCapacity = resource.MustParse(globalInitCapacity) - } -} - -type backupReconfigureRef struct { - Name string `json:"name"` - Key string `json:"key"` - Enable parameterPairs `json:"enable,omitempty"` - Disable parameterPairs `json:"disable,omitempty"` -} - -type parameterPairs map[string][]appsv1alpha1.ParameterPair - -func (r *BackupPolicyReconciler) reconfigure(reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy, - basePolicy dataprotectionv1alpha1.BasePolicy, - backType dataprotectionv1alpha1.BackupType) error { - - reconfigRef := backupPolicy.Annotations[constant.ReconfigureRefAnnotationKey] - if reconfigRef == "" { - return nil - } - configRef := backupReconfigureRef{} - if err := json.Unmarshal([]byte(reconfigRef), &configRef); err != nil { - return err - } - - enable := false - commonSchedule := backupPolicy.Spec.GetCommonSchedulePolicy(backType) - if commonSchedule != nil { - enable = commonSchedule.Enable - } - if backupPolicy.Annotations[constant.LastAppliedConfigAnnotationKey] == "" && !enable { - // disable in the first policy created, no need reconfigure because default configs had been set. - return nil - } - configParameters := configRef.Disable - if enable { - configParameters = configRef.Enable - } - if configParameters == nil { - return nil - } - parameters := configParameters[string(backType)] - if len(parameters) == 0 { - // skip reconfigure if not found parameters. - return nil - } - updateParameterPairsBytes, _ := json.Marshal(parameters) - updateParameterPairs := string(updateParameterPairsBytes) - if updateParameterPairs == backupPolicy.Annotations[constant.LastAppliedConfigAnnotationKey] { - // reconcile the config job if finished - return r.reconcileReconfigure(reqCtx, backupPolicy) - } - - ops := appsv1alpha1.OpsRequest{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: backupPolicy.Name + "-", - Namespace: backupPolicy.Namespace, - Labels: map[string]string{ - dataProtectionLabelBackupPolicyKey: backupPolicy.Name, - }, - }, - Spec: appsv1alpha1.OpsRequestSpec{ - Type: appsv1alpha1.ReconfiguringType, - ClusterRef: basePolicy.Target.LabelsSelector.MatchLabels[constant.AppInstanceLabelKey], - Reconfigure: &appsv1alpha1.Reconfigure{ - ComponentOps: appsv1alpha1.ComponentOps{ - ComponentName: basePolicy.Target.LabelsSelector.MatchLabels[constant.KBAppComponentLabelKey], - }, - Configurations: []appsv1alpha1.ConfigurationItem{ - { - Name: configRef.Name, - Keys: []appsv1alpha1.ParameterConfig{ - { - Key: configRef.Key, - Parameters: parameters, - }, - }, - }, - }, - }, - }, - } - if err := r.Client.Create(reqCtx.Ctx, &ops); err != nil { - return err - } - - r.Recorder.Eventf(backupPolicy, corev1.EventTypeNormal, "Reconfiguring", "update config %s", updateParameterPairs) - patch := client.MergeFrom(backupPolicy.DeepCopy()) - if backupPolicy.Annotations == nil { - backupPolicy.Annotations = map[string]string{} - } - backupPolicy.Annotations[constant.LastAppliedConfigAnnotationKey] = updateParameterPairs - if err := r.Client.Patch(reqCtx.Ctx, backupPolicy, patch); err != nil { - return err - } - return intctrlutil.NewErrorf(intctrlutil.ErrorTypeRequeue, "requeue to waiting for ops %s finished.", ops.Name) -} - -func (r *BackupPolicyReconciler) reconcileReconfigure(reqCtx intctrlutil.RequestCtx, - backupPolicy *dataprotectionv1alpha1.BackupPolicy) error { - - opsList := appsv1alpha1.OpsRequestList{} - if err := r.Client.List(reqCtx.Ctx, &opsList, - client.InNamespace(backupPolicy.Namespace), - client.MatchingLabels{dataProtectionLabelBackupPolicyKey: backupPolicy.Name}); err != nil { - return err - } - if len(opsList.Items) > 0 { - sort.Slice(opsList.Items, func(i, j int) bool { - return opsList.Items[j].CreationTimestamp.Before(&opsList.Items[i].CreationTimestamp) - }) - latestOps := opsList.Items[0] - if latestOps.Status.Phase == appsv1alpha1.OpsFailedPhase { - return intctrlutil.NewErrorf(intctrlutil.ErrorTypeReconfigureFailed, "ops failed %s", latestOps.Name) - } else if latestOps.Status.Phase != appsv1alpha1.OpsSucceedPhase { - return intctrlutil.NewErrorf(intctrlutil.ErrorTypeRequeue, "requeue to waiting for ops %s finished.", latestOps.Name) - } - } +func (r *BackupPolicyReconciler) deleteExternalResources( + _ intctrlutil.RequestCtx, + _ *dpv1alpha1.BackupPolicy) error { return nil } diff --git a/controllers/dataprotection/backuppolicy_controller_test.go b/controllers/dataprotection/backuppolicy_controller_test.go index 289f3673660..5a44f8d1266 100644 --- a/controllers/dataprotection/backuppolicy_controller_test.go +++ b/controllers/dataprotection/backuppolicy_controller_test.go @@ -20,550 +20,50 @@ along with this program. If not, see . package dataprotection import ( - "fmt" - "time" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" - viper "github.com/apecloud/kubeblocks/internal/viperx" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" ) -var _ = Describe("Backup Policy Controller", func() { - const clusterName = "wesql-cluster" - const componentName = "replicasets-primary" - const containerName = "mysql" - const defaultPVCSize = "1Gi" - const backupPolicyName = "test-backup-policy" - const backupRemotePVCName = "backup-remote-pvc" - const defaultSchedule = "0 3 * * *" - const defaultTTL = "7d" - const backupNamePrefix = "test-backup-job-" - const mgrNamespace = "kube-system" - - viper.SetDefault(constant.CfgKeyCtrlrMgrNS, testCtx.DefaultNamespace) - +var _ = Describe("BackupPolicy Controller test", func() { cleanEnv := func() { // must wait till resources deleted and no longer existed before the testcases start, // otherwise if later it needs to create some new resource objects with the same name, // in race conditions, it will find the existence of old objects, resulting failure to // create the new objects. By("clean resources") - viper.SetDefault(constant.CfgKeyCtrlrMgrNS, mgrNamespace) - // delete rest mocked objects inNS := client.InNamespace(testCtx.DefaultNamespace) ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced - testapps.ClearResources(&testCtx, intctrlutil.ClusterSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.PodSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.BackupPolicySignature, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.BackupSignature, true, inNS) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.JobSignature, true, inNS) - testapps.ClearResources(&testCtx, intctrlutil.CronJobSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.SecretSignature, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.PersistentVolumeClaimSignature, true, inNS) - // mgr namespaced - inMgrNS := client.InNamespace(mgrNamespace) - testapps.ClearResources(&testCtx, intctrlutil.CronJobSignature, inMgrNS, ml) // non-namespaced - testapps.ClearResources(&testCtx, intctrlutil.BackupToolSignature, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.ActionSetSignature, true, ml) + + // namespaced + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.BackupPolicySignature, true, inNS) } BeforeEach(func() { cleanEnv() - - By("By mocking a statefulset") - sts := testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, clusterName+"-"+componentName, clusterName, componentName). - AddAppInstanceLabel(clusterName). - AddContainer(corev1.Container{Name: containerName, Image: testapps.ApeCloudMySQLImage}). - AddVolumeClaimTemplate(corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{Name: testapps.DataVolumeName}, - Spec: testapps.NewPVC(defaultPVCSize), - }).Create(&testCtx).GetObject() - - By("By mocking a pod belonging to the statefulset") - pod := testapps.NewPodFactory(testCtx.DefaultNamespace, sts.Name+"-0"). - AddAppInstanceLabel(clusterName). - AddContainer(corev1.Container{Name: containerName, Image: testapps.ApeCloudMySQLImage}). - Create(&testCtx).GetObject() - - By("By mocking a pvc belonging to the pod") - _ = testapps.NewPersistentVolumeClaimFactory( - testCtx.DefaultNamespace, "data-"+pod.Name, clusterName, componentName, "data"). - SetStorage("1Gi"). - Create(&testCtx) }) - AfterEach(cleanEnv) - - When("creating backup policy with default settings", func() { - var backupToolName string - getCronjobKey := func(backupType dpv1alpha1.BackupType) types.NamespacedName { - return types.NamespacedName{ - Name: fmt.Sprintf("%s-%s-%s", backupPolicyName, testCtx.DefaultNamespace, backupType), - Namespace: viper.GetString(constant.CfgKeyCtrlrMgrNS), - } - } - - BeforeEach(func() { - viper.Set(constant.CfgKeyCtrlrMgrNS, mgrNamespace) - viper.Set(constant.CfgKeyCtrlrMgrAffinity, - "{\"nodeAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"preference\":{\"matchExpressions\":[{\"key\":\"kb-controller\",\"operator\":\"In\",\"values\":[\"true\"]}]},\"weight\":100}]}}") - viper.Set(constant.CfgKeyCtrlrMgrTolerations, - "[{\"key\":\"key1\", \"operator\": \"Exists\", \"effect\": \"NoSchedule\"}]") - viper.Set(constant.CfgKeyCtrlrMgrNodeSelector, "{\"beta.kubernetes.io/arch\":\"amd64\"}") - - By("By creating a backupTool") - backupTool := testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName()) - backupToolName = backupTool.Name - }) - - AfterEach(func() { - viper.SetDefault(constant.CfgKeyCtrlrMgrNS, testCtx.DefaultNamespace) - viper.Set(constant.CfgKeyCtrlrMgrAffinity, "") - viper.Set(constant.CfgKeyCtrlrMgrTolerations, "") - viper.Set(constant.CfgKeyCtrlrMgrNodeSelector, "") - }) - - Context("creates a backup policy", func() { - var backupPolicyKey types.NamespacedName - var backupPolicy *dpv1alpha1.BackupPolicy - var startingDeadlineMinutes int64 = 60 - BeforeEach(func() { - By("By creating a backupPolicy from backupTool: " + backupToolName) - backupPolicy = testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddDataFilePolicy(). - SetBackupToolName(backupToolName). - SetBackupsHistoryLimit(1). - SetSchedule(defaultSchedule, true). - SetScheduleStartingDeadlineMinutes(&startingDeadlineMinutes). - SetTTL(defaultTTL). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - SetTargetSecretName(clusterName). - AddHookPreCommand("touch /data/mysql/.restore;sync"). - AddHookPostCommand("rm -f /data/mysql/.restore;sync"). - SetPVC(backupRemotePVCName). - Create(&testCtx).GetObject() - backupPolicyKey = client.ObjectKeyFromObject(backupPolicy) - }) - It("should success", func() { - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.PolicyAvailable)) - })).Should(Succeed()) - Eventually(testapps.CheckObj(&testCtx, getCronjobKey(dpv1alpha1.BackupTypeDataFile), func(g Gomega, fetched *batchv1.CronJob) { - g.Expect(fetched.Spec.Schedule).To(Equal(defaultSchedule)) - g.Expect(fetched.Spec.JobTemplate.Spec.Template.Spec.Tolerations).ShouldNot(BeEmpty()) - g.Expect(fetched.Spec.JobTemplate.Spec.Template.Spec.NodeSelector).ShouldNot(BeEmpty()) - g.Expect(fetched.Spec.JobTemplate.Spec.Template.Spec.Affinity).ShouldNot(BeNil()) - g.Expect(fetched.Spec.JobTemplate.Spec.Template.Spec.Affinity.NodeAffinity).ShouldNot(BeNil()) - g.Expect(fetched.Spec.StartingDeadlineSeconds).ShouldNot(BeNil()) - g.Expect(*fetched.Spec.StartingDeadlineSeconds).Should(Equal(startingDeadlineMinutes * 60)) - })).Should(Succeed()) - }) - It("limit backups to 1", func() { - now := metav1.Now() - backupStatus := dpv1alpha1.BackupStatus{ - Phase: dpv1alpha1.BackupCompleted, - Expiration: &now, - StartTimestamp: &now, - CompletionTimestamp: &now, - } - - autoBackupLabel := map[string]string{ - dataProtectionLabelAutoBackupKey: "true", - dataProtectionLabelBackupPolicyKey: backupPolicyName, - dataProtectionLabelBackupTypeKey: string(dpv1alpha1.BackupTypeDataFile), - } - - By("create a expired backup") - backupExpired := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupNamePrefix). - WithRandomName().AddLabelsInMap(autoBackupLabel). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() - By("create 1st limit backup") - backupOutLimit1 := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupNamePrefix). - WithRandomName().AddLabelsInMap(autoBackupLabel). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() - By("create 2nd limit backup") - backupOutLimit2 := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupNamePrefix). - WithRandomName().AddLabelsInMap(autoBackupLabel). - SetBackupPolicyName(backupPolicyName). - SetBackupType(dpv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() - - By("waiting expired backup completed") - backupExpiredKey := client.ObjectKeyFromObject(backupExpired) - patchK8sJobStatus(backupExpiredKey, batchv1.JobComplete) - Eventually(testapps.CheckObj(&testCtx, backupExpiredKey, - func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) - })).Should(Succeed()) - By("mock update expired backup status to expire") - backupStatus.Expiration = &metav1.Time{Time: now.Add(-time.Hour * 24)} - backupStatus.StartTimestamp = backupStatus.Expiration - patchBackupStatus(backupStatus, client.ObjectKeyFromObject(backupExpired)) - - By("waiting 1st limit backup completed") - backupOutLimit1Key := client.ObjectKeyFromObject(backupOutLimit1) - patchK8sJobStatus(backupOutLimit1Key, batchv1.JobComplete) - Eventually(testapps.CheckObj(&testCtx, backupOutLimit1Key, - func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) - })).Should(Succeed()) - By("mock update 1st limit backup NOT to expire") - backupStatus.Expiration = &metav1.Time{Time: now.Add(time.Hour * 24)} - backupStatus.StartTimestamp = &metav1.Time{Time: now.Add(time.Hour)} - patchBackupStatus(backupStatus, client.ObjectKeyFromObject(backupOutLimit1)) - - By("waiting 2nd limit backup completed") - backupOutLimit2Key := client.ObjectKeyFromObject(backupOutLimit2) - patchK8sJobStatus(backupOutLimit2Key, batchv1.JobComplete) - Eventually(testapps.CheckObj(&testCtx, backupOutLimit2Key, - func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) - })).Should(Succeed()) - By("mock update 2nd limit backup NOT to expire") - backupStatus.Expiration = &metav1.Time{Time: now.Add(time.Hour * 24)} - backupStatus.StartTimestamp = &metav1.Time{Time: now.Add(time.Hour * 2)} - patchBackupStatus(backupStatus, client.ObjectKeyFromObject(backupOutLimit2)) - - // trigger the backup policy controller through update cronjob - patchCronJobStatus(getCronjobKey(dpv1alpha1.BackupTypeDataFile)) - - By("retain the latest backup") - Eventually(testapps.List(&testCtx, intctrlutil.BackupSignature, - client.MatchingLabels(backupPolicy.Spec.Datafile.Target.LabelsSelector.MatchLabels), - client.InNamespace(backupPolicy.Namespace))).Should(HaveLen(1)) - }) - }) - - Context("creates a backup policy with empty schedule", func() { - var backupPolicyKey types.NamespacedName - var backupPolicy *dpv1alpha1.BackupPolicy - BeforeEach(func() { - By("By creating a backupPolicy from backupTool: " + backupToolName) - backupPolicy = testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - SetBackupToolName(backupToolName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - SetTargetSecretName(clusterName). - AddHookPreCommand("touch /data/mysql/.restore;sync"). - AddHookPostCommand("rm -f /data/mysql/.restore;sync"). - SetPVC(backupRemotePVCName). - Create(&testCtx).GetObject() - backupPolicyKey = client.ObjectKeyFromObject(backupPolicy) - }) - It("should success", func() { - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.PolicyAvailable)) - })).Should(Succeed()) - }) - }) - - Context("creates a backup policy with invalid schedule", func() { - var backupPolicyKey types.NamespacedName - var backupPolicy *dpv1alpha1.BackupPolicy - BeforeEach(func() { - By("By creating a backupPolicy from backupTool: " + backupToolName) - backupPolicy = testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddSnapshotPolicy(). - SetBackupToolName(backupToolName). - SetSchedule("invalid schedule", true). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - SetTargetSecretName(clusterName). - AddHookPreCommand("touch /data/mysql/.restore;sync"). - AddHookPostCommand("rm -f /data/mysql/.restore;sync"). - SetPVC(backupRemotePVCName). - Create(&testCtx).GetObject() - backupPolicyKey = client.ObjectKeyFromObject(backupPolicy) - }) - It("should failed", func() { - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Status.Phase).NotTo(Equal(dpv1alpha1.PolicyAvailable)) - })).Should(Succeed()) - }) - }) - - Context("creating a backupPolicy with secret", func() { - It("creating a backupPolicy with secret", func() { - By("By creating a backupPolicy with empty secret") - randomSecretName := testCtx.GetRandomStr() - backupPolicy := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddDataFilePolicy(). - SetBackupToolName(backupToolName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - SetTargetSecretName(randomSecretName). - AddHookPreCommand("touch /data/mysql/.restore;sync"). - AddHookPostCommand("rm -f /data/mysql/.restore;sync"). - SetPVC(backupRemotePVCName). - Create(&testCtx).GetObject() - backupPolicyKey := client.ObjectKeyFromObject(backupPolicy) - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.PolicyAvailable)) - g.Expect(fetched.Spec.Datafile.Target.Secret.Name).To(Equal(randomSecretName)) - })).Should(Succeed()) - }) - }) - - Context("creating a backupPolicy with global backup config", func() { - It("creating a backupPolicy with global backup config", func() { - By("By creating a backupPolicy with empty secret") - pvcName := "backup-data" - pvcInitCapacity := "10Gi" - viper.SetDefault(constant.CfgKeyBackupPVCName, pvcName) - viper.SetDefault(constant.CfgKeyBackupPVCInitCapacity, pvcInitCapacity) - backupPolicy := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddDataFilePolicy(). - SetBackupToolName(backupToolName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - AddHookPreCommand("touch /data/mysql/.restore;sync"). - AddHookPostCommand("rm -f /data/mysql/.restore;sync"). - Create(&testCtx).GetObject() - backupPolicyKey := client.ObjectKeyFromObject(backupPolicy) - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.PolicyAvailable)) - g.Expect(fetched.Spec.Datafile.PersistentVolumeClaim.Name).ToNot(BeNil()) - g.Expect(*fetched.Spec.Datafile.PersistentVolumeClaim.Name).To(Equal(pvcName)) - g.Expect(fetched.Spec.Datafile.PersistentVolumeClaim.InitCapacity.String()).To(Equal(pvcInitCapacity)) - })).Should(Succeed()) - }) - }) - Context("reconcile a logfile backupPolicy", func() { - It("with reconfigure config and job deployKind", func() { - By("creating a backupPolicy") - pvcName := "backup-data" - pvcInitCapacity := "10Gi" - viper.SetDefault(constant.CfgKeyBackupPVCName, pvcName) - viper.SetDefault(constant.CfgKeyBackupPVCInitCapacity, pvcInitCapacity) - reconfigureRef := `{ - "name": "postgresql-configuration", - "key": "postgresql.conf", - "enable": { - "logfile": [{"key":"archive_command","value":"''"}] - }, - "disable": { - "logfile": [{"key": "archive_command","value":"'/bin/true'"}] - } - }` - backupPolicy := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - AddAnnotations(constant.ReconfigureRefAnnotationKey, reconfigureRef). - AddLogfilePolicy(). - SetBackupToolName(backupToolName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - AddSnapshotPolicy(). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - Create(&testCtx).GetObject() - backupPolicyKey := client.ObjectKeyFromObject(backupPolicy) - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.PolicyAvailable)) - })).Should(Succeed()) - By("enable schedule for reconfigure") - Eventually(testapps.GetAndChangeObj(&testCtx, backupPolicyKey, func(fetched *dpv1alpha1.BackupPolicy) { - fetched.Spec.Schedule.Logfile = &dpv1alpha1.SchedulePolicy{Enable: true, CronExpression: "* * * * *"} - })).Should(Succeed()) - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Annotations[constant.LastAppliedConfigAnnotationKey]).To(Equal(`[{"key":"archive_command","value":"''"}]`)) - })).Should(Succeed()) - - By("disable schedule for reconfigure") - Eventually(testapps.GetAndChangeObj(&testCtx, backupPolicyKey, func(fetched *dpv1alpha1.BackupPolicy) { - fetched.Spec.Schedule.Logfile.Enable = false - })).Should(Succeed()) - Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, fetched *dpv1alpha1.BackupPolicy) { - g.Expect(fetched.Annotations[constant.LastAppliedConfigAnnotationKey]).To(Equal(`[{"key":"archive_command","value":"'/bin/true'"}]`)) - })).Should(Succeed()) - }) - - It("test logfile backup with a statefulSet deployKind", func() { - - // mock a backupTool - backupTool := createStatefulKindBackupTool() - - testLogfileBackupWithStatefulSet := func() { - By("init test resources") - // mock a cluster - cluster := testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, - "test-cd", "test-cv").Create(&testCtx).GetObject() - // mock a backupPolicy - backupPolicy := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). - SetOwnerReferences("apps.kubeblocks.io/v1alpha1", "Cluster", cluster). - AddLogfilePolicy(). - SetTTL("7d"). - SetSchedule("*/1 * * * *", false). - SetBackupToolName(backupTool.Name). - SetPVC(backupRemotePVCName). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - Create(&testCtx).GetObject() - - By("enable logfile schedule, expect for backup and statefulSet creation") - Expect(testapps.ChangeObj(&testCtx, backupPolicy, func(policy *dpv1alpha1.BackupPolicy) { - backupPolicy.Spec.Schedule.Logfile.Enable = true - })).Should(Succeed()) - backup := &dpv1alpha1.Backup{} - sts := &appsv1.StatefulSet{} - backupName := getCreatedCRNameByBackupPolicy(backupPolicy, dpv1alpha1.BackupTypeLogFile) - Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, func(g Gomega, tmpBackup *dpv1alpha1.Backup) { - backup = tmpBackup - g.Expect(tmpBackup.Status.Phase).Should(Equal(dpv1alpha1.BackupRunning)) - })).Should(Succeed()) - Eventually(testapps.CheckObjExists(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, sts, true)).Should(Succeed()) - - By("check the container envs which is injected successfully.") - expectedEnv := map[string]string{ - constant.DPArchiveInterval: "60s", - constant.DPTTL: "7d", - constant.DPLogfileTTL: "192h", - constant.DPLogfileTTLSecond: "691200", - } - checkGenerateENV := func(sts *appsv1.StatefulSet) { - mainContainer := sts.Spec.Template.Spec.Containers[0] - for k, v := range expectedEnv { - for _, env := range mainContainer.Env { - if env.Name != k { - continue - } - Expect(env.Value).Should(Equal(v)) - break - } - } - } - checkGenerateENV(sts) - - By("update cronExpression, expect for noticing backup to reconcile") - Expect(testapps.ChangeObj(&testCtx, backupPolicy, func(policy *dpv1alpha1.BackupPolicy) { - backupPolicy.Spec.Schedule.Logfile.CronExpression = "*/2 * * * *" - ttl := "2h" - backupPolicy.Spec.Retention.TTL = &ttl - })).Should(Succeed()) - // waiting for sts has changed and expect sts env to change to the corresponding value - expectedEnv = map[string]string{ - constant.DPArchiveInterval: "120s", - constant.DPTTL: "2h", - constant.DPLogfileTTL: "26h", - constant.DPLogfileTTLSecond: "93600", - } - oldStsGeneration := sts.Generation - Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, func(g Gomega, tmpSts *appsv1.StatefulSet) { - g.Expect(tmpSts.Generation).Should(Equal(oldStsGeneration + 1)) - checkGenerateENV(tmpSts) - })).Should(Succeed()) - - By("expect to recreate the backup after delete the backup during enable logfile") - Expect(testapps.ChangeObj(&testCtx, backup, func(policy *dpv1alpha1.Backup) { - backup.Finalizers = []string{} - })).Should(Succeed()) - testapps.DeleteObject(&testCtx, client.ObjectKeyFromObject(backup), backup) - Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, func(g Gomega, tmpBackup *dpv1alpha1.Backup) { - g.Expect(tmpBackup.Generation).Should(Equal(int64(1))) - g.Expect(tmpBackup.Status.Phase).Should(Equal(dpv1alpha1.BackupRunning)) - })).Should(Succeed()) - - By("disable logfile, expect the backup phase to Completed and sts is deleted") - Expect(testapps.ChangeObj(&testCtx, backupPolicy, func(policy *dpv1alpha1.BackupPolicy) { - backupPolicy.Spec.Schedule.Logfile.Enable = false - })).Should(Succeed()) - Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, func(g Gomega, tmpBackup *dpv1alpha1.Backup) { - g.Expect(tmpBackup.Status.Phase).Should(Equal(dpv1alpha1.BackupCompleted)) - })).Should(Succeed()) - Eventually(testapps.CheckObjExists(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, sts, false)).Should(Succeed()) - - By("enable logfile schedule, expect to re-create backup ") - Expect(testapps.ChangeObj(&testCtx, backupPolicy, func(policy *dpv1alpha1.BackupPolicy) { - backupPolicy.Spec.Schedule.Logfile.Enable = true - })).Should(Succeed()) - Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, func(g Gomega, tmpBackup *dpv1alpha1.Backup) { - g.Expect(tmpBackup.Status.Phase).Should(Equal(dpv1alpha1.BackupRunning)) - })).Should(Succeed()) - - By("delete cluster, expect the backup phase to Completed") - testapps.DeleteObject(&testCtx, types.NamespacedName{ - Name: clusterName, - Namespace: testCtx.DefaultNamespace, - }, &appsv1alpha1.Cluster{}) - Eventually(testapps.CheckObj(&testCtx, types.NamespacedName{ - Name: backupName, - Namespace: testCtx.DefaultNamespace, - }, func(g Gomega, tmpBackup *dpv1alpha1.Backup) { - g.Expect(tmpBackup.Status.Phase).Should(Equal(dpv1alpha1.BackupCompleted)) - })).Should(Succeed()) - - // disabled logfile - Expect(testapps.ChangeObj(&testCtx, backupPolicy, func(policy *dpv1alpha1.BackupPolicy) { - backupPolicy.Spec.Schedule.Logfile.Enable = false - })).Should(Succeed()) - } - - testLogfileBackupWithStatefulSet() - - // clear backupPolicy - testapps.ClearResources(&testCtx, intctrlutil.BackupPolicySignature, client.InNamespace(testCtx.DefaultNamespace), - client.HasLabels{testCtx.TestObjLabelKey}) + AfterEach(func() { + cleanEnv() + }) - // test again for create a cluster with same name - testLogfileBackupWithStatefulSet() + Context("create a backup policy", func() { + It("backup policy should be available", func() { + By("creating actionSet used by backup policy") + as := testdp.NewFakeActionSet(&testCtx) + Expect(as).ShouldNot(BeNil()) - }) + By("creating backupPolicy and its status should be available") + bp := testdp.NewFakeBackupPolicy(&testCtx, nil) + Expect(bp).ShouldNot(BeNil()) }) }) }) - -func patchBackupStatus(status dpv1alpha1.BackupStatus, key types.NamespacedName) { - Eventually(testapps.GetAndChangeObjStatus(&testCtx, key, func(fetched *dpv1alpha1.Backup) { - fetched.Status = status - })).Should(Succeed()) -} - -func patchCronJobStatus(key types.NamespacedName) { - now := metav1.Now() - Eventually(testapps.GetAndChangeObjStatus(&testCtx, key, func(fetched *batchv1.CronJob) { - fetched.Status = batchv1.CronJobStatus{LastSuccessfulTime: &now, LastScheduleTime: &now} - })).Should(Succeed()) -} - -func createStatefulKindBackupTool() *dpv1alpha1.BackupTool { - By("By creating a backupTool") - backupTool := testapps.CreateCustomizedObj(&testCtx, "backup/pitr_backuptool.yaml", - &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName()) - Expect(testapps.ChangeObj(&testCtx, backupTool, func(bt *dpv1alpha1.BackupTool) { - bt.Spec.DeployKind = dpv1alpha1.DeployKindStatefulSet - })).Should(Succeed()) - return backupTool -} diff --git a/controllers/dataprotection/backuprepo_controller.go b/controllers/dataprotection/backuprepo_controller.go index 6a2241e395a..69b547453cb 100644 --- a/controllers/dataprotection/backuprepo_controller.go +++ b/controllers/dataprotection/backuprepo_controller.go @@ -54,6 +54,7 @@ import ( storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" "github.com/apecloud/kubeblocks/internal/generics" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -108,7 +109,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // handle finalizer - res, err := intctrlutil.HandleCRDeletion(reqCtx, r, repo, dataProtectionFinalizerName, func() (*ctrl.Result, error) { + res, err := intctrlutil.HandleCRDeletion(reqCtx, r, repo, dptypes.DataProtectionFinalizerName, func() (*ctrl.Result, error) { return nil, r.deleteExternalResources(reqCtx, repo) }) if res != nil { @@ -202,7 +203,7 @@ func (r *BackupRepoReconciler) updateStatus(reqCtx intctrlutil.RequestCtx, repo } repo.Status.Phase = phase } - repo.Status.IsDefault = repo.Annotations[constant.DefaultBackupRepoAnnotationKey] == trueVal + repo.Status.IsDefault = repo.Annotations[dptypes.DefaultBackupRepoAnnotationKey] == trueVal // update other fields if repo.Status.BackupPVCName == "" { @@ -566,7 +567,7 @@ func (r *BackupRepoReconciler) listAssociatedBackups( var filtered []*dpv1alpha1.Backup for idx := range backupList.Items { backup := &backupList.Items[idx] - if backup.Status.Phase == dpv1alpha1.BackupFailed { + if backup.Status.Phase == dpv1alpha1.BackupPhaseFailed { continue } filtered = append(filtered, backup) @@ -873,7 +874,7 @@ func (r *BackupRepoReconciler) mapBackupToRepo(ctx context.Context, obj client.O return nil } // ignore failed backups - if backup.Status.Phase == dpv1alpha1.BackupFailed { + if backup.Status.Phase == dpv1alpha1.BackupPhaseFailed { return nil } // we should reconcile the BackupRepo when: diff --git a/controllers/dataprotection/backuprepo_controller_test.go b/controllers/dataprotection/backuprepo_controller_test.go index ae6f95c9205..fdee074a326 100644 --- a/controllers/dataprotection/backuprepo_controller_test.go +++ b/controllers/dataprotection/backuprepo_controller_test.go @@ -37,6 +37,7 @@ import ( dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" viper "github.com/apecloud/kubeblocks/internal/viperx" @@ -210,7 +211,7 @@ parameters: dataProtectionBackupRepoKey: repoKey.Name, dataProtectionWaitRepoPreparationKey: trueVal, } - obj.Spec.BackupType = dpv1alpha1.BackupTypeSnapshot + obj.Spec.BackupMethod = "test-backup-method" obj.Spec.BackupPolicyName = "default" if mutateFunc != nil { mutateFunc(obj) @@ -222,11 +223,11 @@ parameters: obj := &dpv1alpha1.Backup{} err := testCtx.Cli.Get(testCtx.Ctx, client.ObjectKeyFromObject(backup), obj) g.Expect(err).ShouldNot(HaveOccurred()) - if obj.Status.Phase == dpv1alpha1.BackupFailed { + if obj.Status.Phase == dpv1alpha1.BackupPhaseFailed { // the controller will set the status to failed because // essential objects (e.g. backup policy) are missed. // we set the status to completed after that, to avoid conflict. - obj.Status.Phase = dpv1alpha1.BackupCompleted + obj.Status.Phase = dpv1alpha1.BackupPhaseCompleted err = testCtx.Cli.Status().Update(testCtx.Ctx, obj) g.Expect(err).ShouldNot(HaveOccurred()) } else { @@ -880,7 +881,7 @@ new-item=new-value By("making the repo default") Eventually(testapps.GetAndChangeObj(&testCtx, repoKey, func(repo *dpv1alpha1.BackupRepo) { repo.Annotations = map[string]string{ - constant.DefaultBackupRepoAnnotationKey: trueVal, + dptypes.DefaultBackupRepoAnnotationKey: trueVal, } })).Should(Succeed()) By("checking the repo is default") diff --git a/controllers/dataprotection/backupschedule_controller.go b/controllers/dataprotection/backupschedule_controller.go new file mode 100644 index 00000000000..64f48815b9a --- /dev/null +++ b/controllers/dataprotection/backupschedule_controller.go @@ -0,0 +1,244 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + "context" + "reflect" + "strings" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dpbackup "github.com/apecloud/kubeblocks/internal/dataprotection/backup" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + dputils "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +// BackupScheduleReconciler reconciles a BackupSchedule object +type BackupScheduleReconciler struct { + client.Client + Scheme *k8sruntime.Scheme + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backupschedules,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backupschedules/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backupschedules/finalizers,verbs=update + +// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get +// +kubebuilder:rbac:groups=batch,resources=cronjobs/finalizers,verbs=update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the backupschedule closer to the desired state. +func (r *BackupScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqCtx := intctrlutil.RequestCtx{ + Ctx: ctx, + Req: req, + Log: log.FromContext(ctx).WithValues("backupSchedule", req.NamespacedName), + Recorder: r.Recorder, + } + + backupSchedule := &dpv1alpha1.BackupSchedule{} + if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, backupSchedule); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + + original := backupSchedule.DeepCopy() + + // handle finalizer + res, err := intctrlutil.HandleCRDeletion(reqCtx, r, backupSchedule, dptypes.DataProtectionFinalizerName, func() (*ctrl.Result, error) { + return nil, r.deleteExternalResources(reqCtx, backupSchedule) + }) + if res != nil { + return *res, err + } + + // try to remove expired or oldest backups, triggered by cronjob controller + // TODO(ldm): another garbage collection controller to remove expired backups + if err = r.removeExpiredBackups(reqCtx); err != nil { + return r.patchStatusFailed(reqCtx, backupSchedule, "RemoveExpiredBackupsFailed", err) + } + + if err = r.handleSchedule(reqCtx, backupSchedule); err != nil { + return r.patchStatusFailed(reqCtx, backupSchedule, "HandleBackupScheduleFailed", err) + } + + return r.patchStatusAvailable(reqCtx, original, backupSchedule) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BackupScheduleReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dpv1alpha1.BackupSchedule{}). + Owns(&batchv1.CronJob{}). + WithOptions(controller.Options{ + MaxConcurrentReconciles: viper.GetInt(maxConcurDataProtectionReconKey), + }). + Complete(r) +} + +func (r *BackupScheduleReconciler) deleteExternalResources( + reqCtx intctrlutil.RequestCtx, + backupSchedule *dpv1alpha1.BackupSchedule) error { + // delete cronjob resource + cronJobList := &batchv1.CronJobList{} + if err := r.Client.List(reqCtx.Ctx, cronJobList, + client.InNamespace(backupSchedule.Namespace), + client.MatchingLabels{ + dataProtectionLabelBackupScheduleKey: backupSchedule.Name, + constant.AppManagedByLabelKey: constant.AppName, + }, + ); err != nil { + return err + } + for _, cronjob := range cronJobList.Items { + if err := dputils.RemoveDataProtectionFinalizer(reqCtx.Ctx, r.Client, &cronjob); err != nil { + return err + } + if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &cronjob); err != nil { + // failed delete k8s job, return error info. + return err + } + } + // notice running backup to completed + // TODO(ldm): is it necessary to notice running backup to completed? + backup := &dpv1alpha1.Backup{} + for _, s := range backupSchedule.Spec.Schedules { + backupKey := client.ObjectKey{ + Namespace: backupSchedule.Namespace, + Name: dpbackup.GenerateCRNameByBackupSchedule(backupSchedule, s.BackupMethod), + } + if err := r.Client.Get(reqCtx.Ctx, backupKey, backup); err != nil { + if client.IgnoreNotFound(err) == nil { + continue + } + return err + } + patch := client.MergeFrom(backup.DeepCopy()) + backup.Status.Phase = dpv1alpha1.BackupPhaseCompleted + backup.Status.CompletionTimestamp = &metav1.Time{Time: time.Now().UTC()} + if err := r.Client.Status().Patch(reqCtx.Ctx, backup, patch); err != nil { + return err + } + } + return nil +} + +// patchStatusAvailable patches backup policy status phase to available. +func (r *BackupScheduleReconciler) patchStatusAvailable(reqCtx intctrlutil.RequestCtx, + origin, backupSchedule *dpv1alpha1.BackupSchedule) (ctrl.Result, error) { + if !reflect.DeepEqual(origin.Spec, backupSchedule.Spec) { + if err := r.Client.Update(reqCtx.Ctx, backupSchedule); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + } + // update status phase + if backupSchedule.Status.Phase != dpv1alpha1.BackupSchedulePhaseAvailable || + backupSchedule.Status.ObservedGeneration != backupSchedule.Generation { + patch := client.MergeFrom(backupSchedule.DeepCopy()) + backupSchedule.Status.ObservedGeneration = backupSchedule.Generation + backupSchedule.Status.Phase = dpv1alpha1.BackupSchedulePhaseAvailable + backupSchedule.Status.FailureReason = "" + if err := r.Client.Status().Patch(reqCtx.Ctx, backupSchedule, patch); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + } + return intctrlutil.Reconciled() +} + +// patchStatusFailed patches backup policy status phase to failed. +func (r *BackupScheduleReconciler) patchStatusFailed(reqCtx intctrlutil.RequestCtx, + backupSchedule *dpv1alpha1.BackupSchedule, + reason string, + err error) (ctrl.Result, error) { + if intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeRequeue) { + return intctrlutil.RequeueAfter(reconcileInterval, reqCtx.Log, "") + } + backupScheduleDeepCopy := backupSchedule.DeepCopy() + backupSchedule.Status.Phase = dpv1alpha1.BackupSchedulePhaseFailed + backupSchedule.Status.FailureReason = err.Error() + if !reflect.DeepEqual(backupSchedule.Status, backupScheduleDeepCopy.Status) { + if patchErr := r.Client.Status().Patch(reqCtx.Ctx, backupSchedule, client.MergeFrom(backupScheduleDeepCopy)); patchErr != nil { + return intctrlutil.RequeueWithError(patchErr, reqCtx.Log, "") + } + } + r.Recorder.Event(backupSchedule, corev1.EventTypeWarning, reason, err.Error()) + return intctrlutil.RequeueWithError(err, reqCtx.Log, "") +} + +func (r *BackupScheduleReconciler) removeExpiredBackups(reqCtx intctrlutil.RequestCtx) error { + backups := dpv1alpha1.BackupList{} + if err := r.Client.List(reqCtx.Ctx, &backups, + client.InNamespace(reqCtx.Req.Namespace)); err != nil { + return err + } + + now := metav1.Now() + for _, item := range backups.Items { + // ignore retained backup. + if strings.EqualFold(item.GetLabels()[constant.BackupProtectionLabelKey], constant.BackupRetain) { + continue + } + + // ignore backup which is not expired. + if item.Status.Expiration == nil || !item.Status.Expiration.Before(&now) { + continue + } + + // delete expired backup. + if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &item); err != nil { + // failed delete backups, return error info. + return err + } + } + return nil +} + +// handleSchedule handles backup schedules for different backup method. +func (r *BackupScheduleReconciler) handleSchedule( + reqCtx intctrlutil.RequestCtx, + backupSchedule *dpv1alpha1.BackupSchedule) error { + backupPolicy, err := getBackupPolicyByName(reqCtx, r.Client, backupSchedule.Spec.BackupPolicyName) + if err != nil { + return err + } + scheduler := dpbackup.Scheduler{ + RequestCtx: reqCtx, + BackupSchedule: backupSchedule, + BackupPolicy: backupPolicy, + Client: r.Client, + Scheme: r.Scheme, + } + return scheduler.Schedule() +} diff --git a/controllers/dataprotection/backupschedule_controller_test.go b/controllers/dataprotection/backupschedule_controller_test.go new file mode 100644 index 00000000000..c96f6cae4ba --- /dev/null +++ b/controllers/dataprotection/backupschedule_controller_test.go @@ -0,0 +1,282 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpbackup "github.com/apecloud/kubeblocks/internal/dataprotection/backup" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" + "github.com/apecloud/kubeblocks/internal/generics" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" +) + +var _ = Describe("Backup Schedule Controller", func() { + cleanEnv := func() { + // must wait till resources deleted and no longer existed before the testcases start, + // otherwise if later it needs to create some new resource objects with the same name, + // in race conditions, it will find the existence of old objects, resulting failure to + // create the new objects. + By("clean resources") + // delete rest mocked objects + inNS := client.InNamespace(testCtx.DefaultNamespace) + ml := client.HasLabels{testCtx.TestObjLabelKey} + + // namespaced + testapps.ClearResources(&testCtx, generics.ClusterSignature, inNS, ml) + testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml) + testapps.ClearResources(&testCtx, generics.SecretSignature, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupPolicySignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupScheduleSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS) + + // wait all backup to be deleted, otherwise the controller maybe create + // job to delete the backup between the ClearResources function delete + // the job and get the job list, resulting the ClearResources panic. + Eventually(testapps.List(&testCtx, generics.BackupSignature, inNS)).Should(HaveLen(0)) + + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.CronJobSignature, true, inNS) + + // non-namespaced + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ActionSetSignature, true, ml) + testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupRepoSignature, true, ml) + testapps.ClearResources(&testCtx, generics.StorageProviderSignature, ml) + } + + BeforeEach(func() { + cleanEnv() + _ = testdp.NewFakeCluster(&testCtx) + }) + + AfterEach(cleanEnv) + + When("creating backup schedule with default settings", func() { + var ( + backupPolicy *dpv1alpha1.BackupPolicy + ) + + getCronjobKey := func(backupSchedule *dpv1alpha1.BackupSchedule, + method string) client.ObjectKey { + return client.ObjectKey{ + Name: dpbackup.GenerateCRNameByBackupSchedule(backupSchedule, method), + Namespace: backupPolicy.Namespace, + } + } + + getJobKey := func(backup *dpv1alpha1.Backup) client.ObjectKey { + return client.ObjectKey{ + Name: dpbackup.GenerateBackupJobName(backup, dpbackup.BackupDataJobNamePrefix), + Namespace: backup.Namespace, + } + } + + BeforeEach(func() { + By("creating an actionSet") + actionSet := testdp.NewFakeActionSet(&testCtx) + + By("creating storage provider") + _ = testdp.NewFakeStorageProvider(&testCtx, nil) + + By("creating backup repo") + _, _ = testdp.NewFakeBackupRepo(&testCtx, nil) + + By("By creating a backupPolicy from actionSet " + actionSet.Name) + backupPolicy = testdp.NewFakeBackupPolicy(&testCtx, nil) + }) + + AfterEach(func() { + }) + + Context("creates a backup schedule", func() { + var ( + backupNamePrefix = "schedule-test-backup-" + backupSchedule *dpv1alpha1.BackupSchedule + backupScheduleKey client.ObjectKey + ) + BeforeEach(func() { + By("creating a backupSchedule") + backupSchedule = testdp.NewFakeBackupSchedule(&testCtx, nil) + backupScheduleKey = client.ObjectKeyFromObject(backupSchedule) + }) + + It("should success", func() { + By("checking backupSchedule status, should be available") + Eventually(testapps.CheckObj(&testCtx, backupScheduleKey, func(g Gomega, fetched *dpv1alpha1.BackupSchedule) { + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupSchedulePhaseAvailable)) + })).Should(Succeed()) + + By("checking cronjob, should not exist because all schedule policies of methods are disabled") + Eventually(testapps.CheckObjExists(&testCtx, getCronjobKey(backupSchedule, testdp.BackupMethodName), + &batchv1.CronJob{}, false)).Should(Succeed()) + Eventually(testapps.CheckObjExists(&testCtx, getCronjobKey(backupSchedule, testdp.VSBackupMethodName), + &batchv1.CronJob{}, false)).Should(Succeed()) + + By(fmt.Sprintf("enabling %s method schedule", testdp.BackupMethodName)) + testdp.EnableBackupSchedule(&testCtx, backupSchedule, testdp.BackupMethodName) + + By("checking cronjob, should exist one cronjob to create backup") + Eventually(testapps.CheckObj(&testCtx, getCronjobKey(backupSchedule, testdp.BackupMethodName), func(g Gomega, fetched *batchv1.CronJob) { + schedulePolicy := dpbackup.GetSchedulePolicyByMethod(backupSchedule, testdp.BackupMethodName) + g.Expect(boolptr.IsSetToTrue(schedulePolicy.Enabled)).To(BeTrue()) + g.Expect(fetched.Spec.Schedule).To(Equal(schedulePolicy.CronExpression)) + g.Expect(fetched.Spec.StartingDeadlineSeconds).ShouldNot(BeNil()) + g.Expect(*fetched.Spec.StartingDeadlineSeconds).To(Equal(getStartingDeadlineSeconds(backupSchedule))) + })).Should(Succeed()) + }) + + It("delete expired backups", func() { + now := metav1.Now() + backupStatus := dpv1alpha1.BackupStatus{ + Phase: dpv1alpha1.BackupPhaseCompleted, + Expiration: &now, + StartTimestamp: &now, + CompletionTimestamp: &now, + } + + autoBackupLabel := map[string]string{ + dataProtectionLabelAutoBackupKey: "true", + dataProtectionLabelBackupPolicyKey: testdp.BackupPolicyName, + dataProtectionLabelBackupMethodKey: testdp.BackupMethodName, + } + + createBackup := func(name string) *dpv1alpha1.Backup { + return testdp.NewBackupFactory(testCtx.DefaultNamespace, name). + WithRandomName().AddLabelsInMap(autoBackupLabel). + SetBackupPolicyName(testdp.BackupPolicyName). + SetBackupMethod(testdp.BackupMethodName). + Create(&testCtx).GetObject() + } + + checkBackupCompleted := func(key client.ObjectKey) { + Eventually(testapps.CheckObj(&testCtx, key, + func(g Gomega, fetched *dpv1alpha1.Backup) { + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseCompleted)) + })).Should(Succeed()) + } + + By("create an expired backup") + backupExpired := createBackup(backupNamePrefix + "expired") + + By("create 1st backup") + backupOutLimit1 := createBackup(backupNamePrefix + "1") + + By("create 2nd backup") + backupOutLimit2 := createBackup(backupNamePrefix + "2") + + By("waiting expired backup completed") + expiredKey := client.ObjectKeyFromObject(backupExpired) + testdp.PatchK8sJobStatus(&testCtx, getJobKey(backupExpired), batchv1.JobComplete) + checkBackupCompleted(expiredKey) + + By("mock update expired backup status to expire") + backupStatus.Expiration = &metav1.Time{Time: now.Add(-time.Hour * 24)} + backupStatus.StartTimestamp = backupStatus.Expiration + testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backupExpired), backupStatus) + + By("waiting 1st backup completed") + outLimit1Key := client.ObjectKeyFromObject(backupOutLimit1) + testdp.PatchK8sJobStatus(&testCtx, getJobKey(backupOutLimit1), batchv1.JobComplete) + checkBackupCompleted(outLimit1Key) + + By("mock 1st backup not to expire") + backupStatus.Expiration = &metav1.Time{Time: now.Add(time.Hour * 24)} + backupStatus.StartTimestamp = &metav1.Time{Time: now.Add(time.Hour)} + testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backupOutLimit1), backupStatus) + + By("waiting 2nd backup completed") + outLimit2Key := client.ObjectKeyFromObject(backupOutLimit2) + testdp.PatchK8sJobStatus(&testCtx, getJobKey(backupOutLimit2), batchv1.JobComplete) + checkBackupCompleted(outLimit2Key) + + By("mock 2nd backup not to expire") + backupStatus.Expiration = &metav1.Time{Time: now.Add(time.Hour * 24)} + backupStatus.StartTimestamp = &metav1.Time{Time: now.Add(time.Hour * 2)} + testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backupOutLimit2), backupStatus) + + By("patch backup schedule to trigger the controller to delete expired backup") + Eventually(testapps.GetAndChangeObj(&testCtx, backupScheduleKey, func(fetched *dpv1alpha1.BackupSchedule) { + fetched.Spec.Schedules[0].RetentionPeriod = "1d" + })).Should(Succeed()) + + By("retain the latest backup") + Eventually(testapps.List(&testCtx, generics.BackupSignature, + client.MatchingLabels(autoBackupLabel), + client.InNamespace(backupPolicy.Namespace))).Should(HaveLen(2)) + }) + }) + + Context("creates a backup schedule with empty schedule", func() { + It("should fail when create a backupSchedule without nil schedule policy", func() { + backupScheduleObj := testdp.NewBackupScheduleFactory(testCtx.DefaultNamespace, testdp.BackupScheduleName). + SetBackupPolicyName(testdp.BackupPolicyName). + SetSchedules(nil). + GetObject() + Expect(testCtx.CheckedCreateObj(testCtx.Ctx, backupScheduleObj)).Should(HaveOccurred()) + }) + + It("should fail when create a backupSchedule without empty schedule policy", func() { + backupScheduleObj := testdp.NewBackupScheduleFactory(testCtx.DefaultNamespace, testdp.BackupScheduleName). + SetBackupPolicyName(testdp.BackupPolicyName). + GetObject() + Expect(testCtx.CheckedCreateObj(testCtx.Ctx, backupScheduleObj)).Should(HaveOccurred()) + }) + }) + + Context("creates a backup schedule with invalid field", func() { + var ( + backupScheduleKey client.ObjectKey + backupSchedule *dpv1alpha1.BackupSchedule + ) + + BeforeEach(func() { + By("creating a backupSchedule") + backupSchedule = testdp.NewFakeBackupSchedule(&testCtx, func(schedule *dpv1alpha1.BackupSchedule) { + schedule.Spec.Schedules[0].CronExpression = "invalid" + }) + backupScheduleKey = client.ObjectKeyFromObject(backupSchedule) + }) + + It("should fail", func() { + Eventually(testapps.CheckObj(&testCtx, backupScheduleKey, func(g Gomega, fetched *dpv1alpha1.BackupSchedule) { + g.Expect(fetched.Status.Phase).NotTo(Equal(dpv1alpha1.BackupSchedulePhaseAvailable)) + })).Should(Succeed()) + }) + }) + }) +}) + +func getStartingDeadlineSeconds(backupSchedule *dpv1alpha1.BackupSchedule) int64 { + if backupSchedule.Spec.StartingDeadlineMinutes == nil { + return 0 + } + return *backupSchedule.Spec.StartingDeadlineMinutes * 60 +} diff --git a/controllers/dataprotection/backuptool_controller.go b/controllers/dataprotection/backuptool_controller.go deleted file mode 100644 index 768b315bebe..00000000000 --- a/controllers/dataprotection/backuptool_controller.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package dataprotection - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - viper "github.com/apecloud/kubeblocks/internal/viperx" -) - -// BackupToolReconciler reconciles a BackupTool object -type BackupToolReconciler struct { - client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder -} - -// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backuptools,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backuptools/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backuptools/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BackupTool object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile -func (r *BackupToolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) - // NOTES: - // setup common request context - reqCtx := intctrlutil.RequestCtx{ - Ctx: ctx, - Req: req, - Log: log.FromContext(ctx).WithValues("backupTool", req.NamespacedName), - Recorder: r.Recorder, - } - - backupTool := &dataprotectionv1alpha1.BackupTool{} - if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, backupTool); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - - // handle finalizer - res, err := intctrlutil.HandleCRDeletion(reqCtx, r, backupTool, dataProtectionFinalizerName, func() (*ctrl.Result, error) { - return nil, r.deleteExternalResources(reqCtx, backupTool) - }) - if res != nil { - return *res, err - } - // TODO(user): your logic here - - return ctrl.Result{}, nil -} - -// SetupWithManager sets up the controller with the Manager. -func (r *BackupToolReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&dataprotectionv1alpha1.BackupTool{}). - WithOptions(controller.Options{ - MaxConcurrentReconciles: viper.GetInt(maxConcurDataProtectionReconKey), - }). - Complete(r) -} - -func (r *BackupToolReconciler) deleteExternalResources(reqCtx intctrlutil.RequestCtx, backupTool *dataprotectionv1alpha1.BackupTool) error { - // - // delete any external resources associated with the cronJob - // - // Ensure that delete implementation is idempotent and safe to invoke - // multiple times for same object. - - return nil -} diff --git a/controllers/dataprotection/cronjob_controller.go b/controllers/dataprotection/cronjob_controller.go deleted file mode 100644 index 7bad6b9f2ab..00000000000 --- a/controllers/dataprotection/cronjob_controller.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package dataprotection - -import ( - "context" - - batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" -) - -// CronJobReconciler reconciles a cronjob object -type CronJobReconciler struct { - client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder -} - -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get -// +kubebuilder:rbac:groups=batch,resources=cronjobs/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile -func (r *CronJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - var ( - cronJob = &batchv1.CronJob{} - backupPolicy = &dataprotectionv1alpha1.BackupPolicy{} - err error - ) - - reqCtx := intctrlutil.RequestCtx{ - Ctx: ctx, - Req: req, - Log: log.FromContext(ctx).WithValues("cronJob", req.NamespacedName), - } - - if err = r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, cronJob); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - - backupPolicyKey := types.NamespacedName{ - Namespace: cronJob.Annotations["kubeblocks.io/backup-namespace"], - Name: cronJob.Labels[dataProtectionLabelBackupPolicyKey], - } - if err = r.Client.Get(reqCtx.Ctx, backupPolicyKey, backupPolicy); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - patch := client.MergeFrom(backupPolicy.DeepCopy()) - if cronJob.Status.LastScheduleTime != nil { - backupPolicy.Status.LastScheduleTime = cronJob.Status.LastScheduleTime - backupPolicy.Status.LastSuccessfulTime = cronJob.Status.LastSuccessfulTime - if err := r.Client.Status().Patch(ctx, backupPolicy, patch); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - } - return intctrlutil.Reconciled() -} - -// SetupWithManager sets up the controller with the Manager. -func (r *CronJobReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&batchv1.CronJob{}). - Owns(&batchv1.Job{}). - WithEventFilter(predicate.NewPredicateFuncs(intctrlutil.ManagedByKubeBlocksFilterPredicate)). - Complete(r) -} diff --git a/controllers/dataprotection/cue/cronjob.cue b/controllers/dataprotection/cue/cronjob.cue deleted file mode 100644 index 830d0fd7d84..00000000000 --- a/controllers/dataprotection/cue/cronjob.cue +++ /dev/null @@ -1,138 +0,0 @@ -//Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -//This file is part of KubeBlocks project -// -//This program is free software: you can redistribute it and/or modify -//it under the terms of the GNU Affero General Public License as published by -//the Free Software Foundation, either version 3 of the License, or -//(at your option) any later version. -// -//This program is distributed in the hope that it will be useful -//but WITHOUT ANY WARRANTY; without even the implied warranty of -//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -//GNU Affero General Public License for more details. -// -//You should have received a copy of the GNU Affero General Public License -//along with this program. If not, see . - -options: { - name: string - backupPolicyName: string - namespace: string - mgrNamespace: string - cluster: string - schedule: string - backupType: string - ttl: string - serviceAccount: string - image: string - tolerations: { - tolerations: [...] - affinity: {...} - nodeSelector: {...} - } -} - -cronjob: { - apiVersion: "batch/v1" - kind: "CronJob" - metadata: { - name: options.name - namespace: options.mgrNamespace - annotations: - "kubeblocks.io/backup-namespace": options.namespace - labels: - "app.kubernetes.io/managed-by": "kubeblocks" - } - spec: { - schedule: options.schedule - successfulJobsHistoryLimit: 0 - failedJobsHistoryLimit: 1 - concurrencyPolicy: "Forbid" - jobTemplate: spec: template: spec: { - restartPolicy: "Never" - serviceAccountName: options.serviceAccount - affinity: options.tolerations.affinity - tolerations: options.tolerations.tolerations - nodeSelector: options.tolerations.nodeSelector - containers: [{ - name: "backup-policy" - image: options.image - imagePullPolicy: "IfNotPresent" - command: [ - "sh", - "-c", - ] - args: [ - """ -kubectl create -f - <. - -options: { - backupName: string - containerName: string - namespace: string - image: string - imagePullPolicy: string -} - -container: { - image: options.image - name: options.containerName - imagePullPolicy: options.imagePullPolicy - command: ["sh", "-c"] - args: [ - """ -retryTimes=0 -oldBackupInfo= -trap "echo 'Terminating...' && exit" TERM -while true; do - sleep 3; - if [ ! -f ${BACKUP_INFO_FILE} ]; then - continue - fi - backupInfo=$(cat ${BACKUP_INFO_FILE}) - if [ "${oldBackupInfo}" == "${backupInfo}" ]; then - continue - fi - echo "start to patch backupInfo: ${backupInfo}" - eval kubectl -n \(options.namespace) patch backup \(options.backupName) --subresource=status --type=merge --patch '{\\\"status\\\":${backupInfo}}' - if [ $? -ne 0 ]; then - retryTimes=$(($retryTimes+1)) - else - echo "update backup status successfully" - retryTimes=0 - oldBackupInfo=${backupInfo} - fi - if [ $retryTimes -ge 3 ]; then - echo "ERROR: update backup status failed, 3 attempts have been made!" - exit 1 - fi -done -""", - ] -} diff --git a/controllers/dataprotection/restore_controller.go b/controllers/dataprotection/restore_controller.go new file mode 100644 index 00000000000..3e2b0f7d95e --- /dev/null +++ b/controllers/dataprotection/restore_controller.go @@ -0,0 +1,389 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + "context" + "fmt" + "reflect" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dperrors "github.com/apecloud/kubeblocks/internal/dataprotection/errors" + dprestore "github.com/apecloud/kubeblocks/internal/dataprotection/restore" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" +) + +// RestoreReconciler reconciles a Restore object +type RestoreReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=restores,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=restores/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=restores/finalizers,verbs=update +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile +func (r *RestoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqCtx := intctrlutil.RequestCtx{ + Ctx: ctx, + Req: req, + Log: log.FromContext(ctx).WithValues("backup", req.NamespacedName), + Recorder: r.Recorder, + } + + // Get restore CR + restore := &dpv1alpha1.Restore{} + if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, restore); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + + // handle finalizer + res, err := intctrlutil.HandleCRDeletion(reqCtx, r, restore, dptypes.DataProtectionFinalizerName, func() (*ctrl.Result, error) { + return nil, r.deleteExternalResources(reqCtx, restore) + }) + if res != nil { + return *res, err + } + + switch restore.Status.Phase { + case "": + return r.newAction(reqCtx, restore) + case dpv1alpha1.RestorePhaseRunning: + return r.inProgressAction(reqCtx, restore) + } + return intctrlutil.Reconciled() +} + +// SetupWithManager sets up the controller with the Manager. +func (r *RestoreReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dpv1alpha1.Restore{}). + Owns(&batchv1.Job{}). + Complete(r) +} + +func (r *RestoreReconciler) deleteExternalResources(reqCtx intctrlutil.RequestCtx, restore *dpv1alpha1.Restore) error { + jobs := &batchv1.JobList{} + if err := r.Client.List(reqCtx.Ctx, jobs, + client.InNamespace(restore.Namespace), + client.MatchingLabels(dprestore.BuildRestoreLabels(restore.Name))); err != nil { + return client.IgnoreNotFound(err) + } + for i := range jobs.Items { + job := &jobs.Items[i] + if controllerutil.ContainsFinalizer(job, dptypes.DataProtectionFinalizerName) { + patch := client.MergeFrom(job.DeepCopy()) + controllerutil.RemoveFinalizer(job, dptypes.DataProtectionFinalizerName) + if err := r.Patch(reqCtx.Ctx, job, patch); err != nil { + return err + } + } + } + return nil +} + +func (r *RestoreReconciler) newAction(reqCtx intctrlutil.RequestCtx, restore *dpv1alpha1.Restore) (ctrl.Result, error) { + oldRestore := restore.DeepCopy() + patch := client.MergeFrom(oldRestore) + // patch metaObject + if restore.Labels == nil { + restore.Labels = map[string]string{} + } + restore.Labels[constant.AppManagedByLabelKey] = constant.AppName + if !reflect.DeepEqual(restore.ObjectMeta, oldRestore.ObjectMeta) { + if err := r.Client.Patch(reqCtx.Ctx, restore, patch); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + return intctrlutil.Reconciled() + } + // patch status + restore.Status.StartTimestamp = &metav1.Time{Time: time.Now()} + restore.Status.Phase = dpv1alpha1.RestorePhaseRunning + r.Recorder.Event(restore, corev1.EventTypeNormal, dprestore.ReasonRestoreStarting, "start to restore") + if err := r.Client.Status().Patch(reqCtx.Ctx, restore, patch); err != nil { + return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + return intctrlutil.Reconciled() +} + +func (r *RestoreReconciler) inProgressAction(reqCtx intctrlutil.RequestCtx, restore *dpv1alpha1.Restore) (ctrl.Result, error) { + restoreMgr := dprestore.NewRestoreManager(restore, r.Recorder, r.Scheme) + // handle restore actions + err := r.handleRestoreActions(reqCtx, restoreMgr) + if intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeFatal) { + // set restore phase to failed if the error is fatal. + restoreMgr.Restore.Status.Phase = dpv1alpha1.RestorePhaseFailed + restoreMgr.Restore.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} + restoreMgr.Restore.Status.Duration = dprestore.GetRestoreDuration(restoreMgr.Restore.Status) + r.Recorder.Event(restore, corev1.EventTypeWarning, dprestore.ReasonRestoreFailed, err.Error()) + err = nil + } + // patch restore status if changes occur + if !reflect.DeepEqual(restoreMgr.OriginalRestore.Status, restoreMgr.Restore.Status) { + err = r.Client.Status().Patch(reqCtx.Ctx, restoreMgr.Restore, client.MergeFrom(restoreMgr.OriginalRestore)) + } + if err != nil { + return intctrlutil.RequeueWithError(err, reqCtx.Log, "") + } + return intctrlutil.Reconciled() +} + +func (r *RestoreReconciler) handleRestoreActions(reqCtx intctrlutil.RequestCtx, restoreMgr *dprestore.RestoreManager) error { + // 1. validate if the restore.spec is valid and build restore manager. + if err := r.validateAndBuildMGR(reqCtx, restoreMgr); err != nil { + return err + } + + // 2. handle the prepareData stage. + isCompleted, err := r.prepareData(reqCtx, restoreMgr) + if err != nil { + return err + } + // if prepareData is not completed, return + if !isCompleted { + return nil + } + // 3. handle the postReady stage. + isCompleted, err = r.postReady(reqCtx, restoreMgr) + if err != nil { + return err + } + if isCompleted { + restoreMgr.Restore.Status.Phase = dpv1alpha1.RestorePhaseCompleted + restoreMgr.Restore.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} + restoreMgr.Restore.Status.Duration = dprestore.GetRestoreDuration(restoreMgr.Restore.Status) + r.Recorder.Event(restoreMgr.Restore, corev1.EventTypeNormal, dprestore.ReasonRestoreCompleted, "restore completed.") + } + return nil +} + +// validateAndBuildMGR validates the spec is valid to restore. if ok, build a manager for restoring. +func (r *RestoreReconciler) validateAndBuildMGR(reqCtx intctrlutil.RequestCtx, restoreMgr *dprestore.RestoreManager) (err error) { + defer func() { + if err == nil { + dprestore.SetRestoreValidationCondition(restoreMgr.Restore, dprestore.ReasonValidateSuccessfully, "validate restore spec successfully") + } else if intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeFatal) { + dprestore.SetRestoreValidationCondition(restoreMgr.Restore, dprestore.ReasonValidateFailed, err.Error()) + r.Recorder.Event(restoreMgr.Restore, corev1.EventTypeWarning, dprestore.ReasonValidateFailed, err.Error()) + } + }() + + // get backupActionSet based on the specified backup name. + backupName := restoreMgr.Restore.Spec.Backup.Name + backupSet, err := restoreMgr.GetBackupActionSetByNamespaced(reqCtx, r.Client, backupName, restoreMgr.Restore.Spec.Backup.Namespace) + if err != nil { + return err + } + + // check if the backup is completed exclude continuous backup. + var backupType dpv1alpha1.BackupType + if backupSet.ActionSet != nil { + backupType = backupSet.ActionSet.Spec.BackupType + } else if backupSet.UseVolumeSnapshot { + backupType = dpv1alpha1.BackupTypeFull + } + if backupType != dpv1alpha1.BackupTypeContinuous && backupSet.Backup.Status.Phase != dpv1alpha1.BackupPhaseCompleted { + err = intctrlutil.NewFatalError(fmt.Sprintf(`phase of backup "%s" is not completed`, backupName)) + return err + } + + // build backupActionSets of prepareData and postReady stage based on the specified backup's type. + switch backupType { + case dpv1alpha1.BackupTypeFull: + restoreMgr.SetBackupSets(*backupSet) + case dpv1alpha1.BackupTypeIncremental: + err = restoreMgr.BuildIncrementalBackupActionSets(reqCtx, r.Client, *backupSet) + case dpv1alpha1.BackupTypeDifferential: + err = restoreMgr.BuildDifferentialBackupActionSets(reqCtx, r.Client, *backupSet) + case dpv1alpha1.BackupTypeContinuous: + err = intctrlutil.NewErrorf(dperrors.ErrorTypeWaitForExternalHandler, "wait for external handler to do handle the Point-In-Time recovery.") + r.Recorder.Event(restoreMgr.Restore, corev1.EventTypeWarning, string(dperrors.ErrorTypeWaitForExternalHandler), err.Error()) + default: + err = intctrlutil.NewFatalError(fmt.Sprintf("backup type of %s is empty", backupName)) + } + return err +} + +// prepareData handles the prepareData stage of the backups. +func (r *RestoreReconciler) prepareData(reqCtx intctrlutil.RequestCtx, restoreMgr *dprestore.RestoreManager) (bool, error) { + if len(restoreMgr.PrepareDataBackupSets) == 0 { + return true, nil + } + prepareDataConfig := restoreMgr.Restore.Spec.PrepareDataConfig + if prepareDataConfig == nil || (prepareDataConfig.RestoreVolumeClaimsTemplate == nil && len(prepareDataConfig.RestoreVolumeClaims) == 0) { + return true, nil + } + if meta.IsStatusConditionTrue(restoreMgr.Restore.Status.Conditions, dprestore.ConditionTypeRestorePreparedData) { + return true, nil + } + var ( + err error + isCompleted bool + ) + defer func() { + r.handleRestoreStageError(restoreMgr.Restore, dpv1alpha1.PrepareData, err) + }() + // set processing prepare data condition + dprestore.SetRestoreStageCondition(restoreMgr.Restore, dpv1alpha1.PrepareData, dprestore.ReasonProcessing, "processing prepareData stage.") + for i, v := range restoreMgr.PrepareDataBackupSets { + isCompleted, err = r.handleBackupActionSet(reqCtx, restoreMgr, v, dpv1alpha1.PrepareData, i) + if err != nil { + return false, err + } + // waiting for restore jobs finished. + if !isCompleted { + return false, nil + } + } + // set prepare data successfully condition + dprestore.SetRestoreStageCondition(restoreMgr.Restore, dpv1alpha1.PrepareData, dprestore.ReasonSucceed, "prepare data successfully") + return true, nil +} + +func (r *RestoreReconciler) postReady(reqCtx intctrlutil.RequestCtx, restoreMgr *dprestore.RestoreManager) (bool, error) { + readyConfig := restoreMgr.Restore.Spec.ReadyConfig + if len(restoreMgr.PostReadyBackupSets) == 0 || readyConfig == nil { + return true, nil + } + if meta.IsStatusConditionTrue(restoreMgr.Restore.Status.Conditions, dprestore.ConditionTypeRestorePostReady) { + return true, nil + } + dprestore.SetRestoreStageCondition(restoreMgr.Restore, dpv1alpha1.PostReady, dprestore.ReasonProcessing, "processing postReady stage") + var ( + err error + isCompleted bool + ) + defer func() { + r.handleRestoreStageError(restoreMgr.Restore, dpv1alpha1.PrepareData, err) + }() + if readyConfig.ReadinessProbe != nil && !meta.IsStatusConditionTrue(restoreMgr.Restore.Status.Conditions, dprestore.ConditionTypeReadinessProbe) { + // TODO: check readiness probe, use a job or exec? + _ = klog.TODO() + } + for _, v := range restoreMgr.PostReadyBackupSets { + // handle postReady actions + for i := range v.ActionSet.Spec.Restore.PostReady { + isCompleted, err = r.handleBackupActionSet(reqCtx, restoreMgr, v, dpv1alpha1.PostReady, i) + if err != nil { + return false, err + } + // waiting for restore jobs finished. + if !isCompleted { + return false, nil + } + } + } + dprestore.SetRestoreStageCondition(restoreMgr.Restore, dpv1alpha1.PostReady, dprestore.ReasonSucceed, "processing postReady stage successfully") + return true, nil +} + +func (r *RestoreReconciler) handleBackupActionSet(reqCtx intctrlutil.RequestCtx, + restoreMgr *dprestore.RestoreManager, + backupSet dprestore.BackupActionSet, + stage dpv1alpha1.RestoreStage, + step int) (bool, error) { + handleFailed := func(restore *dpv1alpha1.Restore, backupName string) error { + errorMsg := fmt.Sprintf(`restore failed for backup "%s", more information can be found in status.actions.%s`, backupName, stage) + dprestore.SetRestoreStageCondition(restore, stage, dprestore.ReasonFailed, errorMsg) + return intctrlutil.NewFatalError(errorMsg) + } + + checkIsCompleted := func(allActionsFinished, existFailedAction bool) (bool, error) { + if !allActionsFinished { + return false, nil + } + if existFailedAction { + return true, handleFailed(restoreMgr.Restore, backupSet.Backup.Name) + } + return true, nil + } + + actionName := fmt.Sprintf("%s-%d", stage, step) + // 1. check if the restore actions are completed from status.actions firstly. + allActionsFinished, existFailedAction := restoreMgr.AnalysisRestoreActionsWithBackup(stage, backupSet.Backup.Name, actionName) + isCompleted, err := checkIsCompleted(allActionsFinished, existFailedAction) + if isCompleted || err != nil { + return isCompleted, err + } + + var jobs []*batchv1.Job + switch stage { + case dpv1alpha1.PrepareData: + if backupSet.UseVolumeSnapshot { + if err = restoreMgr.RestorePVCFromSnapshot(reqCtx, r.Client, backupSet, actionName); err != nil { + return false, nil + } + } + jobs, err = restoreMgr.BuildPrepareDataJobs(reqCtx, r.Client, backupSet, actionName) + case dpv1alpha1.PostReady: + // 2. build jobs for postReady action + jobs, err = restoreMgr.BuildPostReadyActionJobs(reqCtx, r.Client, backupSet, backupSet.ActionSet.Spec.Restore.PostReady[step]) + } + if err != nil { + return false, err + } + if len(jobs) == 0 { + return true, nil + } + // 3. create jobs + jobs, err = restoreMgr.CreateJobsIfNotExist(reqCtx, r.Client, jobs) + if err != nil { + return false, err + } + + // 4. check if jobs are finished. + allActionsFinished, existFailedAction = restoreMgr.CheckJobsDone(stage, actionName, backupSet, jobs) + if stage == dpv1alpha1.PrepareData { + // recalculation whether all actions have been completed. + restoreMgr.Recalculation(backupSet.Backup.Name, actionName, &allActionsFinished, &existFailedAction) + } + return checkIsCompleted(allActionsFinished, existFailedAction) +} + +func (r *RestoreReconciler) handleRestoreStageError(restore *dpv1alpha1.Restore, stage dpv1alpha1.RestoreStage, err error) { + if intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeFatal) { + condition := meta.FindStatusCondition(restore.Status.Conditions, dprestore.ConditionTypeRestorePreparedData) + if condition != nil && condition.Reason != dprestore.ReasonFailed { + dprestore.SetRestoreStageCondition(restore, stage, dprestore.ReasonFailed, err.Error()) + } + } +} diff --git a/controllers/dataprotection/restorejob_controller.go b/controllers/dataprotection/restorejob_controller.go deleted file mode 100644 index e38ab27351e..00000000000 --- a/controllers/dataprotection/restorejob_controller.go +++ /dev/null @@ -1,350 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package dataprotection - -import ( - "context" - "fmt" - - appv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - "k8s.io/utils/clock" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - viper "github.com/apecloud/kubeblocks/internal/viperx" -) - -// RestoreJobReconciler reconciles a RestoreJob object -type RestoreJobReconciler struct { - client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder - clock clock.RealClock -} - -// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=restorejobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=restorejobs/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=restorejobs/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the RestoreJob object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile -func (r *RestoreJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // NOTES: - // setup common request context - reqCtx := intctrlutil.RequestCtx{ - Ctx: ctx, - Req: req, - Log: log.FromContext(ctx).WithValues("restoreJob", req.NamespacedName), - Recorder: r.Recorder, - } - restoreJob := &dataprotectionv1alpha1.RestoreJob{} - if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, restoreJob); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - reqCtx.Log.Info("in RestoreJob Reconciler: name: " + restoreJob.Name + " phase: " + string(restoreJob.Status.Phase)) - - // handle finalizer - res, err := intctrlutil.HandleCRDeletion(reqCtx, r, restoreJob, dataProtectionFinalizerName, func() (*ctrl.Result, error) { - return nil, r.deleteExternalResources(reqCtx, restoreJob) - }) - if res != nil { - return *res, err - } - - switch restoreJob.Status.Phase { - case "", dataprotectionv1alpha1.RestoreJobNew: - return r.doRestoreNewPhaseAction(reqCtx, restoreJob) - case dataprotectionv1alpha1.RestoreJobInProgressPhy: - return r.doRestoreInProgressPhyAction(reqCtx, restoreJob) - default: - return intctrlutil.Reconciled() - } -} - -// SetupWithManager sets up the controller with the Manager. -func (r *RestoreJobReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&dataprotectionv1alpha1.RestoreJob{}). - WithOptions(controller.Options{ - MaxConcurrentReconciles: viper.GetInt(maxConcurDataProtectionReconKey), - }). - Complete(r) -} - -func (r *RestoreJobReconciler) doRestoreNewPhaseAction( - reqCtx intctrlutil.RequestCtx, - restoreJob *dataprotectionv1alpha1.RestoreJob) (ctrl.Result, error) { - - // 1. get stateful service and - // 2. set stateful replicas to 0 - patch := []byte(`{"spec":{"replicas":0}}`) - if err := r.patchTargetCluster(reqCtx, restoreJob, patch); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - - // get backup tool - // get backup job - // build a job pod sec - jobPodSpec, err := r.buildPodSpec(reqCtx, restoreJob) - if err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: restoreJob.Namespace, - Name: restoreJob.Name, - Labels: buildRestoreJobLabels(restoreJob.Name), - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: restoreJob.Namespace, - Name: restoreJob.Name}, - Spec: jobPodSpec, - }, - }, - } - reqCtx.Log.Info("create a built-in job from restoreJob", "job", job) - - if err := r.Client.Create(reqCtx.Ctx, job); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - - // update Phase to InProgress - restoreJob.Status.Phase = dataprotectionv1alpha1.RestoreJobInProgressPhy - restoreJob.Status.StartTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} - if err := r.Client.Status().Update(reqCtx.Ctx, restoreJob); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - return intctrlutil.Reconciled() -} - -func (r *RestoreJobReconciler) doRestoreInProgressPhyAction( - reqCtx intctrlutil.RequestCtx, - restoreJob *dataprotectionv1alpha1.RestoreJob) (ctrl.Result, error) { - job, err := r.getBatchV1Job(reqCtx, restoreJob) - if err != nil { - // not found backup job, retry create job - reqCtx.Log.Info(err.Error()) - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - jobStatusConditions := job.Status.Conditions - if len(jobStatusConditions) == 0 { - return intctrlutil.RequeueAfter(reconcileInterval, reqCtx.Log, "") - } - - switch jobStatusConditions[0].Type { - case batchv1.JobComplete: - // update Phase to Completed - restoreJob.Status.Phase = dataprotectionv1alpha1.RestoreJobCompleted - restoreJob.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} - // get stateful service and - // set stateful replicas to 1 - patch := []byte(`{"spec":{"replicas":1}}`) - if err := r.patchTargetCluster(reqCtx, restoreJob, patch); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - case batchv1.JobFailed: - restoreJob.Status.Phase = dataprotectionv1alpha1.RestoreJobFailed - restoreJob.Status.FailureReason = job.Status.Conditions[0].Reason - } - if err := r.Client.Status().Update(reqCtx.Ctx, restoreJob); err != nil { - return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") - } - return intctrlutil.Reconciled() -} - -func (r *RestoreJobReconciler) deleteExternalResources(reqCtx intctrlutil.RequestCtx, restoreJob *dataprotectionv1alpha1.RestoreJob) error { - // - // delete any external resources associated with the cronJob - // - // Ensure that delete implementation is idempotent and safe to invoke - // multiple times for same object. - - // delete k8s job. - job, err := r.getBatchV1Job(reqCtx, restoreJob) - if err != nil { - // not found backup job, do nothing - reqCtx.Log.Info(err.Error()) - return nil - } - - if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, job); err != nil { - return err - } - return nil -} - -func (r *RestoreJobReconciler) getBatchV1Job(reqCtx intctrlutil.RequestCtx, backup *dataprotectionv1alpha1.RestoreJob) (*batchv1.Job, error) { - job := &batchv1.Job{} - jobNameSpaceName := types.NamespacedName{ - Namespace: reqCtx.Req.Namespace, - Name: backup.Name, - } - if err := r.Client.Get(reqCtx.Ctx, jobNameSpaceName, job); err != nil { - // not found backup job, do nothing - reqCtx.Log.Info(err.Error()) - return nil, err - } - return job, nil -} - -func (r *RestoreJobReconciler) buildPodSpec(reqCtx intctrlutil.RequestCtx, restoreJob *dataprotectionv1alpha1.RestoreJob) (corev1.PodSpec, error) { - var podSpec corev1.PodSpec - logger := reqCtx.Log - - // get backup job - backup := &dataprotectionv1alpha1.Backup{} - backupNameSpaceName := types.NamespacedName{ - Namespace: reqCtx.Req.Namespace, - Name: restoreJob.Spec.BackupJobName, - } - if err := r.Get(reqCtx.Ctx, backupNameSpaceName, backup); err != nil { - logger.Error(err, "Unable to get backup for restore.", "backup", backupNameSpaceName) - return podSpec, err - } - - // get backup tool - backupTool := &dataprotectionv1alpha1.BackupTool{} - backupToolNameSpaceName := types.NamespacedName{ - Namespace: reqCtx.Req.Namespace, - Name: backup.Status.BackupToolName, - } - if err := r.Client.Get(reqCtx.Ctx, backupToolNameSpaceName, backupTool); err != nil { - logger.Error(err, "Unable to get backupTool for backup.", "BackupTool", backupToolNameSpaceName) - return podSpec, err - } - - if len(backup.Status.PersistentVolumeClaimName) == 0 { - return podSpec, nil - } - - container := corev1.Container{} - container.Name = restoreJob.Name - container.Command = []string{"sh", "-c"} - container.Args = backupTool.Spec.Physical.GetPhysicalRestoreCommand() - container.Image = backupTool.Spec.Image - if backupTool.Spec.Resources != nil { - container.Resources = *backupTool.Spec.Resources - } - - container.VolumeMounts = restoreJob.Spec.TargetVolumeMounts - - // add the volumeMounts with backup volume - restoreVolumeName := fmt.Sprintf("restore-%s", backup.Status.PersistentVolumeClaimName) - remoteVolume := corev1.Volume{ - Name: restoreVolumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: backup.Status.PersistentVolumeClaimName, - }, - }, - } - // add remote volumeMounts - remoteVolumeMount := corev1.VolumeMount{} - remoteVolumeMount.Name = restoreVolumeName - remoteVolumeMount.MountPath = "/data" - container.VolumeMounts = append(container.VolumeMounts, remoteVolumeMount) - - allowPrivilegeEscalation := false - runAsUser := int64(0) - container.SecurityContext = &corev1.SecurityContext{ - AllowPrivilegeEscalation: &allowPrivilegeEscalation, - RunAsUser: &runAsUser} - - // build env for restore - envBackupName := corev1.EnvVar{ - Name: "BACKUP_NAME", - Value: backup.Name, - } - - container.Env = []corev1.EnvVar{envBackupName} - // merge env from backup tool. - container.Env = append(container.Env, backupTool.Spec.Env...) - - podSpec.Containers = []corev1.Container{container} - - podSpec.Volumes = restoreJob.Spec.TargetVolumes - - // add remote volumes - podSpec.Volumes = append(podSpec.Volumes, remoteVolume) - - // TODO(dsj): mount readonly remote volumes for restore. - // podSpec.Volumes[0].PersistentVolumeClaim.ReadOnly = true - podSpec.RestartPolicy = corev1.RestartPolicyNever - - return podSpec, nil -} - -func (r *RestoreJobReconciler) patchTargetCluster(reqCtx intctrlutil.RequestCtx, restoreJob *dataprotectionv1alpha1.RestoreJob, patch []byte) error { - // get stateful service - clusterTarget := &appv1.StatefulSetList{} - if err := r.Client.List(reqCtx.Ctx, clusterTarget, - client.InNamespace(reqCtx.Req.Namespace), - client.MatchingLabels(restoreJob.Spec.Target.LabelsSelector.MatchLabels)); err != nil { - return err - } - reqCtx.Log.Info("Get cluster target finish", "target", clusterTarget) - clusterItemsLen := len(clusterTarget.Items) - if clusterItemsLen != 1 { - if clusterItemsLen <= 0 { - restoreJob.Status.FailureReason = "Can not find any statefulsets with labelsSelector." - } else { - restoreJob.Status.FailureReason = "Match more than one results, please check the labelsSelector." - } - restoreJob.Status.Phase = dataprotectionv1alpha1.RestoreJobFailed - reqCtx.Log.Info(restoreJob.Status.FailureReason) - if err := r.Client.Status().Update(reqCtx.Ctx, restoreJob); err != nil { - return err - } - return nil - } - // patch stateful set - if err := r.Client.Patch(reqCtx.Ctx, &clusterTarget.Items[0], client.RawPatch(types.StrategicMergePatchType, patch)); err != nil { - return err - } - return nil -} - -func buildRestoreJobLabels(jobName string) map[string]string { - return map[string]string{ - dataProtectionLabelRestoreJobNameKey: jobName, - constant.AppManagedByLabelKey: constant.AppName, - } -} diff --git a/controllers/dataprotection/restorejob_controller_test.go b/controllers/dataprotection/restorejob_controller_test.go deleted file mode 100644 index 733d50d68d3..00000000000 --- a/controllers/dataprotection/restorejob_controller_test.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package dataprotection - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/generics" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" -) - -var _ = Describe("RestoreJob Controller", func() { - const ( - clusterName = "mycluster" - compName = "cluster" - ) - cleanEnv := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - - // delete rest mocked objects - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced - testapps.ClearResources(&testCtx, intctrlutil.StatefulSetSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.PodSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.RestoreJobSignature, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.BackupSignature, true, inNS) - testapps.ClearResources(&testCtx, intctrlutil.BackupPolicySignature, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.JobSignature, true, inNS) - testapps.ClearResources(&testCtx, intctrlutil.CronJobSignature, inNS, ml) - // non-namespaced - testapps.ClearResources(&testCtx, intctrlutil.BackupToolSignature, ml) - testapps.ClearResources(&testCtx, intctrlutil.BackupPolicyTemplateSignature, ml) - } - - BeforeEach(cleanEnv) - - AfterEach(cleanEnv) - - assureRestoreJobObj := func(backup string) *dataprotectionv1alpha1.RestoreJob { - By("By assure an restoreJob obj") - return testapps.NewRestoreJobFactory(testCtx.DefaultNamespace, "restore-job-"). - WithRandomName().SetBackupJobName(backup). - SetTargetSecretName("mycluster-cluster-secret"). - AddTargetVolumePVC("mysql-restore-storage", "datadir-mycluster-0"). - AddTargetVolumeMount(corev1.VolumeMount{Name: "mysql-restore-storage", MountPath: "/var/lib/mysql"}). - Create(&testCtx).GetObject() - } - - assureBackupObj := func(backupPolicy string) *dataprotectionv1alpha1.Backup { - By("By assure an backup obj") - return testapps.NewBackupFactory(testCtx.DefaultNamespace, "backup-job-"). - WithRandomName().SetBackupPolicyName(backupPolicy). - SetBackupType(dataprotectionv1alpha1.BackupTypeDataFile). - Create(&testCtx).GetObject() - } - - assureBackupPolicyObj := func(backupTool string) *dataprotectionv1alpha1.BackupPolicy { - By("By assure an backupPolicy obj") - return testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, "backup-policy-"). - WithRandomName(). - AddDataFilePolicy(). - AddMatchLabels(constant.AppInstanceLabelKey, clusterName). - SetSchedule("0 3 * * *", true). - SetTTL("7d"). - SetBackupToolName(backupTool). - SetTargetSecretName("mycluster-cluster-secret"). - SetPVC("backup-host-path-pvc"). - Create(&testCtx).GetObject() - } - - assureBackupToolObj := func(withoutResources ...bool) *dataprotectionv1alpha1.BackupTool { - By("By assure an backupTool obj") - return testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dataprotectionv1alpha1.BackupTool{}, testapps.RandomizedObjName(), - func(bt *dataprotectionv1alpha1.BackupTool) { - nilResources := false - // optional arguments, only use the first one. - if len(withoutResources) > 0 { - nilResources = withoutResources[0] - } - if nilResources { - bt.Spec.Resources = nil - } - }) - } - - assureStatefulSetObj := func() *appsv1.StatefulSet { - By("By assure an stateful obj") - return testapps.NewStatefulSetFactory(testCtx.DefaultNamespace, clusterName, clusterName, compName). - SetReplicas(3). - AddAppInstanceLabel(clusterName). - AddContainer(corev1.Container{Name: "mysql", Image: testapps.ApeCloudMySQLImage}). - AddVolumeClaimTemplate(corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{Name: testapps.DataVolumeName}, - Spec: testapps.NewPVC("1Gi"), - }).Create(&testCtx).GetObject() - } - - patchBackupStatus := func(phase dataprotectionv1alpha1.BackupPhase, key types.NamespacedName) { - Eventually(testapps.GetAndChangeObjStatus(&testCtx, key, func(backup *dataprotectionv1alpha1.Backup) { - backup.Status.Phase = phase - })).Should(Succeed()) - } - - patchK8sJobStatus := func(jobStatus batchv1.JobConditionType, key types.NamespacedName) { - Eventually(testapps.GetAndChangeObjStatus(&testCtx, key, func(job *batchv1.Job) { - found := false - for _, cond := range job.Status.Conditions { - if cond.Type == jobStatus { - found = true - } - } - if !found { - jobCondition := batchv1.JobCondition{Type: jobStatus} - job.Status.Conditions = append(job.Status.Conditions, jobCondition) - } - })).Should(Succeed()) - } - - testRestoreJob := func(withResources ...bool) { - By("By creating a statefulset and pod") - sts := assureStatefulSetObj() - testapps.MockConsensusComponentPods(&testCtx, sts, clusterName, compName) - - By("By creating a backupTool") - backupTool := assureBackupToolObj(withResources...) - - By("By creating a backupPolicy from backupTool: " + backupTool.Name) - backupPolicy := assureBackupPolicyObj(backupTool.Name) - - By("By creating a backup from backupPolicy: " + backupPolicy.Name) - backup := assureBackupObj(backupPolicy.Name) - - By("By creating a restoreJob from backup: " + backup.Name) - toCreate := assureRestoreJobObj(backup.Name) - key := types.NamespacedName{ - Name: toCreate.Name, - Namespace: toCreate.Namespace, - } - backupKey := types.NamespacedName{Name: backup.Name, Namespace: backup.Namespace} - Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, fetched *dataprotectionv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dataprotectionv1alpha1.BackupInProgress)) - })).Should(Succeed()) - - patchBackupStatus(dataprotectionv1alpha1.BackupCompleted, backupKey) - - patchK8sJobStatus(batchv1.JobComplete, key) - - result := &dataprotectionv1alpha1.RestoreJob{} - Eventually(func() bool { - Expect(k8sClient.Get(ctx, key, result)).Should(Succeed()) - return result.Status.Phase == dataprotectionv1alpha1.RestoreJobCompleted || - result.Status.Phase == dataprotectionv1alpha1.RestoreJobFailed - }).Should(BeTrue()) - Expect(result.Status.Phase).Should(Equal(dataprotectionv1alpha1.RestoreJobCompleted)) - } - - Context("When creating restoreJob", func() { - It("Should success with no error", func() { - testRestoreJob() - }) - - It("Without backupTool resources should success with no error", func() { - testRestoreJob(true) - }) - }) - -}) diff --git a/controllers/dataprotection/suite_test.go b/controllers/dataprotection/suite_test.go index e060fa5a1f6..c8175186368 100644 --- a/controllers/dataprotection/suite_test.go +++ b/controllers/dataprotection/suite_test.go @@ -30,8 +30,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v3/apis/volumesnapshot/v1beta1" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + vsv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v3/apis/volumesnapshot/v1beta1" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "go.uber.org/zap/zapcore" batchv1 "k8s.io/api/batch/v1" "k8s.io/client-go/kubernetes/scheme" @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/testutil" @@ -103,16 +103,16 @@ var _ = BeforeSuite(func() { scheme := scheme.Scheme - err = snapshotv1.AddToScheme(scheme) + err = vsv1.AddToScheme(scheme) Expect(err).NotTo(HaveOccurred()) - err = snapshotv1beta1.AddToScheme(scheme) + err = vsv1beta1.AddToScheme(scheme) Expect(err).NotTo(HaveOccurred()) err = appsv1alpha1.AddToScheme(scheme) Expect(err).NotTo(HaveOccurred()) - err = dataprotectionv1alpha1.AddToScheme(scheme) + err = dpv1alpha1.AddToScheme(scheme) Expect(err).NotTo(HaveOccurred()) err = storagev1alpha1.AddToScheme(scheme) @@ -125,13 +125,16 @@ var _ = BeforeSuite(func() { Expect(k8sClient).NotTo(BeNil()) uncachedObjects := []client.Object{ - &dataprotectionv1alpha1.BackupPolicy{}, - &dataprotectionv1alpha1.BackupTool{}, - &dataprotectionv1alpha1.Backup{}, - &dataprotectionv1alpha1.RestoreJob{}, - &snapshotv1.VolumeSnapshot{}, - &snapshotv1beta1.VolumeSnapshot{}, + &dpv1alpha1.ActionSet{}, + &dpv1alpha1.BackupPolicy{}, + &dpv1alpha1.BackupSchedule{}, + &dpv1alpha1.BackupRepo{}, + &dpv1alpha1.Backup{}, + &dpv1alpha1.Restore{}, + &vsv1.VolumeSnapshot{}, + &vsv1beta1.VolumeSnapshot{}, &batchv1.Job{}, + &batchv1.CronJob{}, } // run reconcile k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ @@ -147,33 +150,25 @@ var _ = BeforeSuite(func() { Recorder: k8sManager.GetEventRecorderFor("backup-controller"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - Expect(err).ToNot(HaveOccurred()) - - err = (&BackupPolicyReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("backup-policy-controller"), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - err = (&BackupToolReconciler{ + err = (&BackupScheduleReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("backup-tool-controller"), + Recorder: k8sManager.GetEventRecorderFor("backup-schedule-controller"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&RestoreJobReconciler{ + err = (&BackupPolicyReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("restore-job-controller"), + Recorder: k8sManager.GetEventRecorderFor("backup-policy-controller"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&CronJobReconciler{ + err = (&ActionSetReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("cronjob-controller"), + Recorder: k8sManager.GetEventRecorderFor("actionset-controller"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/controllers/dataprotection/type.go b/controllers/dataprotection/types.go similarity index 75% rename from controllers/dataprotection/type.go rename to controllers/dataprotection/types.go index 6860acd232d..883afd289b3 100644 --- a/controllers/dataprotection/type.go +++ b/controllers/dataprotection/types.go @@ -20,13 +20,9 @@ along with this program. If not, see . package dataprotection import ( - "embed" "runtime" "time" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -35,16 +31,15 @@ const ( ) const ( - // name of our custom finalizer - dataProtectionFinalizerName = "dataprotection.kubeblocks.io/finalizer" // settings keys maxConcurDataProtectionReconKey = "MAXCONCURRENTRECONCILES_DATAPROTECTION" // label keys + dataProtectionLabelBackupScheduleKey = "dataprotection.kubeblocks.io/backup-schedule" dataProtectionLabelBackupPolicyKey = "dataprotection.kubeblocks.io/backup-policy" + dataProtectionLabelBackupMethodKey = "dataprotection.kubeblocks.io/backup-method" dataProtectionLabelBackupTypeKey = "dataprotection.kubeblocks.io/backup-type" dataProtectionLabelAutoBackupKey = "dataprotection.kubeblocks.io/autobackup" - dataProtectionLabelRestoreJobNameKey = "restorejobs.dataprotection.kubeblocks.io/name" dataProtectionBackupTargetPodKey = "dataprotection.kubeblocks.io/target-pod-name" dataProtectionAnnotationCreateByPolicyKey = "dataprotection.kubeblocks.io/created-by-policy" @@ -58,11 +53,6 @@ const ( dataProtectionTemplateValuesMD5AnnotationKey = "dataprotection.kubeblocks.io/template-values-md5" dataProtectionPVCTemplateMD5MD5AnnotationKey = "dataprotection.kubeblocks.io/pvc-template-md5" dataProtectionToolConfigTemplateMD5MD5AnnotationKey = "dataprotection.kubeblocks.io/tool-config-template-md5" - - // the key of persistentVolumeTemplate in the configmap. - persistentVolumeTemplateKey = "persistentVolume" - - hostNameLabelKey = "kubernetes.io/hostname" ) // condition constants @@ -95,29 +85,8 @@ const ( ReasonUnknownError = "UnknownError" ) -const manifestsUpdaterContainerName = "manifests-updater" - var reconcileInterval = time.Second func init() { viper.SetDefault(maxConcurDataProtectionReconKey, runtime.NumCPU()*2) } - -var ( - //go:embed cue/* - cueTemplates embed.FS -) - -type backupPolicyOptions struct { - Name string `json:"name"` - BackupPolicyName string `json:"backupPolicyName"` - Namespace string `json:"namespace"` - MgrNamespace string `json:"mgrNamespace"` - Cluster string `json:"cluster"` - Schedule string `json:"schedule"` - BackupType string `json:"backupType"` - TTL metav1.Duration `json:"ttl,omitempty"` - ServiceAccount string `json:"serviceAccount"` - Image string `json:"image"` - Tolerations *corev1.PodSpec `json:"tolerations"` -} diff --git a/controllers/dataprotection/utils.go b/controllers/dataprotection/utils.go index 7ac4d23d219..2673180976a 100644 --- a/controllers/dataprotection/utils.go +++ b/controllers/dataprotection/utils.go @@ -22,180 +22,148 @@ package dataprotection import ( "context" "fmt" - "strconv" + "sort" "strings" "sync" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" - viper "github.com/apecloud/kubeblocks/internal/viperx" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) var ( errNoDefaultBackupRepo = fmt.Errorf("no default BackupRepo found") ) -// byBackupStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker. -type byBackupStartTime []dataprotectionv1alpha1.Backup - -// Len returns the length of byBackupStartTime, for the sort.Sort -func (o byBackupStartTime) Len() int { return len(o) } - -// Swap the items, for the sort.Sort -func (o byBackupStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] } - -// Less defines how to compare items, for the sort.Sort -func (o byBackupStartTime) Less(i, j int) bool { - if o[i].Status.StartTimestamp == nil && o[j].Status.StartTimestamp != nil { - return false +func getBackupPolicyByName( + reqCtx intctrlutil.RequestCtx, + cli client.Client, + name string) (*dpv1alpha1.BackupPolicy, error) { + backupPolicy := &dpv1alpha1.BackupPolicy{} + key := client.ObjectKey{ + Namespace: reqCtx.Req.Namespace, + Name: name, } - if o[i].Status.StartTimestamp != nil && o[j].Status.StartTimestamp == nil { - return true - } - if o[i].Status.StartTimestamp.Equal(o[j].Status.StartTimestamp) { - return o[i].Name < o[j].Name + if err := cli.Get(reqCtx.Ctx, key, backupPolicy); err != nil { + return nil, err } - return o[i].Status.StartTimestamp.Before(o[j].Status.StartTimestamp) + return backupPolicy, nil } -// getBackupToolByName gets the backupTool by name. -func getBackupToolByName(reqCtx intctrlutil.RequestCtx, cli client.Client, backupName string) (*dataprotectionv1alpha1.BackupTool, error) { - backupTool := &dataprotectionv1alpha1.BackupTool{} - backupToolNameSpaceName := types.NamespacedName{ - Name: backupName, +// getActionSetByName gets the ActionSet by name. +func getActionSetByName(reqCtx intctrlutil.RequestCtx, + cli client.Client, name string) (*dpv1alpha1.ActionSet, error) { + if name == "" { + return nil, nil } - if err := cli.Get(reqCtx.Ctx, backupToolNameSpaceName, backupTool); err != nil { - reqCtx.Log.Error(err, "Unable to get backupTool for backup.", "BackupTool", backupToolNameSpaceName) + as := &dpv1alpha1.ActionSet{} + if err := cli.Get(reqCtx.Ctx, client.ObjectKey{Name: name}, as); err != nil { + reqCtx.Log.Error(err, "failed to get ActionSet for backup.", "ActionSet", name) return nil, err } - return backupTool, nil + return as, nil } -// getCreatedCRNameByBackupPolicy gets the CR name which is created by BackupPolicy, such as CronJob/logfile Backup. -func getCreatedCRNameByBackupPolicy(backupPolicy *dataprotectionv1alpha1.BackupPolicy, backupType dataprotectionv1alpha1.BackupType) string { - name := fmt.Sprintf("%s-%s", generateUniqueNameWithBackupPolicy(backupPolicy), backupPolicy.Namespace) - if len(name) > 30 { - name = strings.TrimRight(name[:30], "-") +func getBackupMethodByName(name string, backupPolicy *dpv1alpha1.BackupPolicy) *dpv1alpha1.BackupMethod { + for _, m := range backupPolicy.Spec.BackupMethods { + if m.Name == name { + return &m + } } - return fmt.Sprintf("%s-%s", name, string(backupType)) -} - -func getClusterLabelKeys() []string { - return []string{constant.AppInstanceLabelKey, constant.KBAppComponentLabelKey} -} - -func excludeLabelsForWorkload() []string { - return []string{constant.KBAppComponentLabelKey} + return nil } -func buildAutoCreationAnnotations(backupPolicyName string) map[string]string { - return map[string]string{ - dataProtectionAnnotationCreateByPolicyKey: "true", - dataProtectionLabelBackupPolicyKey: backupPolicyName, +// getTargetPods gets the target pods by BackupPolicy. If podName is not empty, +// it will return the pod which name is podName. Otherwise, it will return the +// pods which are selected by BackupPolicy selector and strategy. +func getTargetPods(reqCtx intctrlutil.RequestCtx, + cli client.Client, podName string, + backupPolicy *dpv1alpha1.BackupPolicy) ([]*corev1.Pod, error) { + selector := backupPolicy.Spec.Target.PodSelector + if selector == nil || selector.LabelSelector == nil { + return nil, nil } -} -// getBackupDestinationPath gets the destination path to storage backup datas. -func getBackupDestinationPath(backup *dataprotectionv1alpha1.Backup, pathPrefix string) string { - pathPrefix = strings.TrimRight(pathPrefix, "/") - if strings.TrimSpace(pathPrefix) == "" || strings.HasPrefix(pathPrefix, "/") { - return fmt.Sprintf("/%s%s/%s", backup.Namespace, pathPrefix, backup.Name) + labelSelector, err := metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, err + } + pods := &corev1.PodList{} + if err = cli.List(reqCtx.Ctx, pods, + client.InNamespace(reqCtx.Req.Namespace), + client.MatchingLabelsSelector{Selector: labelSelector}); err != nil { + return nil, err } - return fmt.Sprintf("/%s/%s/%s", backup.Namespace, pathPrefix, backup.Name) -} -// buildBackupWorkloadsLabels builds the labels for workloads which owned by backup. -func buildBackupWorkloadsLabels(backup *dataprotectionv1alpha1.Backup) map[string]string { - labels := backup.Labels - if labels == nil { - labels = map[string]string{} - } else { - for _, v := range excludeLabelsForWorkload() { - delete(labels, v) - } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("failed to find target pods by backup policy %s/%s", + backupPolicy.Namespace, backupPolicy.Name) } - labels[constant.DataProtectionLabelBackupNameKey] = backup.Name - return labels -} -func addTolerations(podSpec *corev1.PodSpec) (err error) { - if cmTolerations := viper.GetString(constant.CfgKeyCtrlrMgrTolerations); cmTolerations != "" { - if err = json.Unmarshal([]byte(cmTolerations), &podSpec.Tolerations); err != nil { - return err + var targetPods []*corev1.Pod + if podName != "" { + for _, pod := range pods.Items { + if pod.Name == podName { + targetPods = append(targetPods, &pod) + break + } } - } - if cmAffinity := viper.GetString(constant.CfgKeyCtrlrMgrAffinity); cmAffinity != "" { - if err = json.Unmarshal([]byte(cmAffinity), &podSpec.Affinity); err != nil { - return err + if len(targetPods) > 0 { + return targetPods, nil } } - if cmNodeSelector := viper.GetString(constant.CfgKeyCtrlrMgrNodeSelector); cmNodeSelector != "" { - if err = json.Unmarshal([]byte(cmNodeSelector), &podSpec.NodeSelector); err != nil { - return err + + strategy := selector.Strategy + sort.Sort(intctrlutil.ByPodName(pods.Items)) + // if pod selection strategy is Any, always return first pod + switch strategy { + case dpv1alpha1.PodSelectionStrategyAny: + if len(pods.Items) > 0 { + targetPods = append(targetPods, &pods.Items[0]) + } + case dpv1alpha1.PodSelectionStrategyAll: + for i := range pods.Items { + targetPods = append(targetPods, &pods.Items[i]) } } - return nil + + return targetPods, nil } -// getIntervalSecondsForLogfile gets the interval seconds for logfile schedule cronExpression. -// currently, only the fields of minutes and hours are taken and contain expressions such as '*/'. -// If there is no such field, the default return is 60s. -func getIntervalSecondsForLogfile(backupType dataprotectionv1alpha1.BackupType, cronExpression string) string { - if backupType != dataprotectionv1alpha1.BackupTypeLogFile { - return "" - } - // move time zone field - if strings.HasPrefix(cronExpression, "TZ=") || strings.HasPrefix(cronExpression, "CRON_TZ=") { - i := strings.Index(cronExpression, " ") - cronExpression = strings.TrimSpace(cronExpression[i:]) - } - var interval = "60" - // skip the macro syntax - if strings.HasPrefix(cronExpression, "@") { - return interval + "s" - } - fields := strings.Fields(cronExpression) -loop: - for i, v := range fields { - switch i { - case 0: - if strings.HasPrefix(v, "*/") { - m, _ := strconv.Atoi(strings.ReplaceAll(v, "*/", "")) - interval = strconv.Itoa(m * 60) - break loop - } - case 1: - if strings.HasPrefix(v, "*/") { - m, _ := strconv.Atoi(strings.ReplaceAll(v, "*/", "")) - interval = strconv.Itoa(m * 60 * 60) - break loop - } - default: - break loop - } - } - return interval + "s" +// getCluster gets the cluster and will ignore the error. +func getCluster(ctx context.Context, + cli client.Client, + targetPod *corev1.Pod) *appsv1alpha1.Cluster { + clusterName := targetPod.Labels[constant.AppInstanceLabelKey] + if len(clusterName) == 0 { + return nil + } + cluster := &appsv1alpha1.Cluster{} + if err := cli.Get(ctx, client.ObjectKey{ + Namespace: targetPod.Namespace, + Name: clusterName, + }, cluster); err != nil { + // should not affect the backup status + return nil + } + return cluster } -// filterCreatedByPolicy filters the workloads which are create by backupPolicy. -func filterCreatedByPolicy(object client.Object) bool { - labels := object.GetLabels() - _, containsPolicyNameLabel := labels[dataProtectionLabelBackupPolicyKey] - return labels[dataProtectionLabelAutoBackupKey] == "true" && containsPolicyNameLabel +func getClusterLabelKeys() []string { + return []string{constant.AppInstanceLabelKey, constant.KBAppComponentLabelKey} } // sendWarningEventForError sends warning event for backup controller error -func sendWarningEventForError(recorder record.EventRecorder, backup *dataprotectionv1alpha1.Backup, err error) { +func sendWarningEventForError(recorder record.EventRecorder, backup *dpv1alpha1.Backup, err error) { controllerErr := intctrlutil.UnwrapControllerError(err) if controllerErr != nil { recorder.Eventf(backup, corev1.EventTypeWarning, string(controllerErr.Type), err.Error()) @@ -205,77 +173,17 @@ func sendWarningEventForError(recorder record.EventRecorder, backup *dataprotect } } -var configVolumeSnapshotError = []string{ - "Failed to set default snapshot class with error", - "Failed to get snapshot class with error", - "Failed to create snapshot content with error cannot find CSI PersistentVolumeSource for volume", -} - -func isVolumeSnapshotConfigError(snap *snapshotv1.VolumeSnapshot) bool { - if snap.Status == nil || snap.Status.Error == nil || snap.Status.Error.Message == nil { - return false - } - for _, errMsg := range configVolumeSnapshotError { - if strings.Contains(*snap.Status.Error.Message, errMsg) { - return true - } - } - return false -} - -func generateJSON(path string, value string) string { - segments := strings.Split(path, ".") - jsonString := value - for i := len(segments) - 1; i >= 0; i-- { - jsonString = fmt.Sprintf(`{\"%s\":%s}`, segments[i], jsonString) - } - return jsonString -} - -// cropJobName job name cannot exceed 63 characters for label name limit. -func cropJobName(jobName string) string { - if len(jobName) > 63 { - return jobName[:63] - } - return jobName -} - -func buildBackupInfoENV(backupDestinationPath string) string { - return backupPathBase + backupDestinationPath + "/backup.info" -} - -func generateUniqueNameWithBackupPolicy(backupPolicy *dataprotectionv1alpha1.BackupPolicy) string { - uniqueName := backupPolicy.Name - if len(backupPolicy.OwnerReferences) > 0 { - uniqueName = fmt.Sprintf("%s-%s", backupPolicy.OwnerReferences[0].UID[:8], backupPolicy.OwnerReferences[0].Name) - } - return uniqueName -} - -func generateUniqueJobName(backup *dataprotectionv1alpha1.Backup, prefix string) string { - return cropJobName(fmt.Sprintf("%s-%s-%s", prefix, backup.UID[:8], backup.Name)) -} - -func buildDeleteBackupFilesJobNamespacedName(backup *dataprotectionv1alpha1.Backup) types.NamespacedName { - jobName := fmt.Sprintf("%s-%s%s", backup.UID[:8], deleteBackupFilesJobNamePrefix, backup.Name) - if len(jobName) > 63 { - jobName = jobName[:63] - } - return types.NamespacedName{Namespace: backup.Namespace, Name: jobName} -} - -func getDefaultBackupRepo(ctx context.Context, cli client.Client) (*dataprotectionv1alpha1.BackupRepo, error) { - backupRepoList := &dataprotectionv1alpha1.BackupRepoList{} - err := cli.List(ctx, backupRepoList) - if err != nil { +func getDefaultBackupRepo(ctx context.Context, cli client.Client) (*dpv1alpha1.BackupRepo, error) { + backupRepoList := &dpv1alpha1.BackupRepoList{} + if err := cli.List(ctx, backupRepoList); err != nil { return nil, err } - var defaultRepo *dataprotectionv1alpha1.BackupRepo + var defaultRepo *dpv1alpha1.BackupRepo for idx := range backupRepoList.Items { repo := &backupRepoList.Items[idx] // skip non-default repo - if !(repo.Annotations[constant.DefaultBackupRepoAnnotationKey] == trueVal && - repo.Status.Phase == dataprotectionv1alpha1.BackupRepoReady) { + if !(repo.Annotations[dptypes.DefaultBackupRepoAnnotationKey] == trueVal && + repo.Status.Phase == dpv1alpha1.BackupRepoReady) { continue } if defaultRepo != nil { @@ -391,12 +299,3 @@ func fromFlattenName(flatten string) (name string, namespace string) { } return } - -func containsJobCondition(job *batchv1.Job, jobCondType batchv1.JobConditionType) bool { - for _, jobCond := range job.Status.Conditions { - if jobCond.Type == jobCondType { - return true - } - } - return false -} diff --git a/controllers/extensions/addon_controller_test.go b/controllers/extensions/addon_controller_test.go index 205453fce0b..361b3dfcd5d 100644 --- a/controllers/extensions/addon_controller_test.go +++ b/controllers/extensions/addon_controller_test.go @@ -40,7 +40,7 @@ import ( extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" "github.com/apecloud/kubeblocks/internal/constant" - intctrlutil "github.com/apecloud/kubeblocks/internal/generics" + "github.com/apecloud/kubeblocks/internal/generics" "github.com/apecloud/kubeblocks/internal/testutil" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" viper "github.com/apecloud/kubeblocks/internal/viperx" @@ -55,21 +55,21 @@ var _ = Describe("Addon controller", func() { By("clean resources") // non-namespaced ml := client.HasLabels{testCtx.TestObjLabelKey} - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.AddonSignature, true, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.AddonSignature, true, ml) inNS := client.InNamespace(viper.GetString(constant.CfgKeyCtrlrMgrNS)) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.JobSignature, true, inNS, + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS, client.HasLabels{ constant.AddonNameLabelKey, }) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.JobSignature, true, inNS, + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS, client.HasLabels{ constant.AppManagedByLabelKey, }) // delete rest mocked objects - testapps.ClearResources(&testCtx, intctrlutil.ConfigMapSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.SecretSignature, inNS, ml) + testapps.ClearResources(&testCtx, generics.ConfigMapSignature, inNS, ml) + testapps.ClearResources(&testCtx, generics.SecretSignature, inNS, ml) // By("deleting the Namespace to perform the tests") // Eventually(func(g Gomega) { diff --git a/deploy/apecloud-mysql/dataprotection/backup.sh b/deploy/apecloud-mysql/dataprotection/backup.sh new file mode 100644 index 00000000000..507a214cbab --- /dev/null +++ b/deploy/apecloud-mysql/dataprotection/backup.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +if [ -d ${DP_BACKUP_DIR} ]; then + rm -rf ${DP_BACKUP_DIR} +fi +mkdir -p ${DP_BACKUP_DIR} +START_TIME=$(date -u '+%Y-%m-%dT%H:%M:%SZ') +xtrabackup --compress=zstd --backup --safe-slave-backup --slave-info --stream=xbstream \ + --host=${DP_DB_HOST} --user=${DP_DB_USER} --port=${DP_DB_PORT} --password=${DP_DB_PASSWORD} --datadir=${DATA_DIR} >${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.xbstream +STOP_TIME=$(date -u '+%Y-%m-%dT%H:%M:%SZ') +TOTAL_SIZE=$(du -shx ${DP_BACKUP_DIR} | awk '{print $1}') +echo "{\"totalSize\":\"$TOTAL_SIZE\",\"timeRange\":{\"start\":\"${START_TIME}\",\"end\":\"${STOP_TIME}\"}}" >${DP_BACKUP_DIR}/backup.info diff --git a/deploy/apecloud-mysql/dataprotection/pitr-backup.sh b/deploy/apecloud-mysql/dataprotection/pitr-backup.sh deleted file mode 100644 index c4694b4d0ca..00000000000 --- a/deploy/apecloud-mysql/dataprotection/pitr-backup.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash - -# export wal-g environments -backup_binlog_dir=${BACKUP_DIR}/${DP_TARGET_POD_NAME} -export WALG_MYSQL_DATASOURCE_NAME="${DB_USER}:${DB_PASSWORD}@tcp(${DB_HOST}:${DP_DB_PORT})/mysql" -export WALG_COMPRESSION_METHOD=zstd -export WALG_FILE_PREFIX=${backup_binlog_dir} -export WALG_MYSQL_CHECK_GTIDS=true -export MYSQL_PWD=${DB_PASSWORD} - -# get binlog basename -MYSQL_CMD="mysql -u ${DB_USER} -h ${DB_HOST} -N" -log_bin_basename=$(${MYSQL_CMD} -e "SHOW VARIABLES LIKE 'log_bin_basename';" | awk -F'\t' '{print $2}') -if [ -z ${log_bin_basename} ]; then - echo "ERROR: pod/${DP_TARGET_POD_NAME} connect failed." - exit 1 -fi -LOG_DIR=$(dirname $log_bin_basename) -LOG_PREFIX=$(basename $log_bin_basename) - -latest_bin_log="" -last_flush_logs_time=$(date +%s) -last_purge_time=$(date +%s) -flush_bin_logs_interval=600 - -if [[ ${FLUSH_BINLOG_INTERVAL_SECONDS} =~ ^[0-9]+$ ]];then - flush_bin_logs_interval=${FLUSH_BINLOG_INTERVAL_SECONDS} -fi - -function log() { - msg=$1 - local curr_date=$(date -u '+%Y-%m-%d %H:%M:%S') - echo "${curr_date} INFO: $msg" -} - -# checks if the mysql process is ok -function check_mysql_process() { - is_ok=false - for ((i=1;i<4;i++));do - role=$(${MYSQL_CMD} -e "select role from information_schema.wesql_cluster_local;" | head -n 1) - if [[ $? -eq 0 && (-z ${DP_TARGET_POD_ROLE} || "${DP_TARGET_POD_ROLE,,}" == "${role,,}") ]]; then - is_ok=true - break - fi - echo "Warning: target backup pod/${DP_TARGET_POD_NAME} is not OK, target role: ${DP_TARGET_POD_ROLE}, current role: ${role}, retry detection!" - sleep 1 - done - if [[ ${is_ok} == "false" ]];then - echo "ERROR: target backup pod/${DP_TARGET_POD_NAME} is not OK, target role: ${DP_TARGET_POD_ROLE}, current role: ${role}!" - exit 1 - fi -} - -# clean up expired logfiles, interval is 60s -function purge_expired_files() { - local curr_time=$(date +%s) - local diff_time=$((${curr_time}-${last_purge_time})) - if [[ -z ${LOGFILE_TTL_SECOND} || ${diff_time} -lt 60 ]]; then - return - fi - if [[ -d ${backup_binlog_dir}/binlog_005 ]];then - local retention_minute=$((${LOGFILE_TTL_SECOND}/60)) - local fileCount=$(find ${backup_binlog_dir}/binlog_005 -mmin +${retention_minute} -name "*.zst" | wc -l) - find ${backup_binlog_dir}/binlog_005 -mmin +${retention_minute} -name "*.zst" -exec rm -rf {} \; - if [ ${fileCount} -gt 0 ]; then - log "clean up expired binlog file successfully, file count: ${fileCount}" - fi - last_purge_time=${curr_time} - fi -} - -# flush bin logs, interval is 600s by default -function flush_binlogs() { - local curr_time=$(date +%s) - local diff_time=$((${curr_time}-${last_flush_logs_time})) - if [[ ${diff_time} -lt ${flush_bin_logs_interval} ]]; then - return - fi - local LATEST_TRANS=$(mysqlbinlog $(ls -Ft $LOG_DIR/|grep -e '^mysql-bin.*[[:digit:]]$' |head -n 1)|grep 'Xid =' |head -n 1) - # only flush bin logs when Xid exists - if [[ -n "${LATEST_TRANS}" ]]; then - log "flush binary logs" - ${MYSQL_CMD} -e "flush binary logs"; - fi - last_flush_logs_time=${curr_time} -} - -# upload bin logs by wal-g -function upload_bin_logs() { - latest_bin_log=$(ls -Ftr $LOG_DIR/|grep -e "^${LOG_PREFIX}.*[[:digit:]]$"|tail -n 1) - wal-g binlog-push; -} - -function get_binlog_start_time() { - local binlog=$1 - local time=$(mysqlbinlog ${binlog} | grep -m 1 "end_log_pos" | awk '{print $1, $2}'|tr -d '#') - local time=$(date -d "$time" -u '+%Y-%m-%dT%H:%M:%SZ') - echo $time -} - -function save_backup_status() { - local first_bin_log=$(ls -Ftr $LOG_DIR/|grep -e "^${LOG_PREFIX}.*[[:digit:]]$"|head -n 1) - local START_TIME=$(get_binlog_start_time $first_bin_log) - local STOP_TIME=$(get_binlog_start_time $latest_bin_log) - local TOTAL_SIZE=$(du -shx ${BACKUP_DIR}|awk '{print $1}') - if [[ -z $STOP_TIME ]];then - echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info - else - echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupLog\":{\"startTime\":\"${START_TIME}\",\"stopTime\":\"${STOP_TIME}\"},\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info - fi -} - -mkdir -p ${backup_binlog_dir} && cd $LOG_DIR -# trap term signal -trap "echo 'Terminating...' && sync && exit 0" TERM -log "start to archive binlog logs" -while true; do - - # check if mysql process is ok - check_mysql_process - - # flush bin logs - flush_binlogs - - # upload bin log - upload_bin_logs - - # save backup status which will be updated to `backup` CR by the sidecar - save_backup_status - - # purge the expired bin logs - purge_expired_files - sleep ${DP_INTERVAL_SECONDS} -done - diff --git a/deploy/apecloud-mysql/dataprotection/pitr-restore.sh b/deploy/apecloud-mysql/dataprotection/pitr-restore.sh deleted file mode 100644 index b5e88437299..00000000000 --- a/deploy/apecloud-mysql/dataprotection/pitr-restore.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -BASE_BACKUP_TIME=${BASE_BACKUP_START_TIME} -if [ -f $DATA_DIR/xtrabackup_info ]; then - BASE_BACKUP_TIME=$(cat $DATA_DIR/xtrabackup_info | grep start_time | awk -F ' = ' '{print $2}'); - BASE_BACKUP_TIME=$(date -d"${BASE_BACKUP_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') -fi -log_index_name="archive_log.index" - -function fetch_pitr_binlogs() { - cd ${BACKUP_DIR} - echo "INFO: fetch binlogs from ${BASE_BACKUP_TIME}" - kb_recovery_timestamp=$(date -d "${KB_RECOVERY_TIME}" +%s) - for file in $(find . -newermt "${BASE_BACKUP_TIME}" -type f -exec ls -tr {} + | grep .zst );do - file_path=${file#./} - file_without_zst=${file_path%.*} - dir_path=`dirname ${file_path}` - # mkdir the log directory - mkdir -p ${PITR_DIR}/${dir_path} - zstd -d ${file} -o ${PITR_DIR}/${file_without_zst} - echo "${PITR_RELATIVE_PATH}/${file_without_zst}" >> ${PITR_DIR}/${log_index_name} - # check if the binlog file contains the data for recovery time - log_start_time=$(mysqlbinlog ${PITR_DIR}/${file_without_zst} | grep -m 1 "end_log_pos" | awk '{print $1, $2}'|tr -d '#') - log_start_timestamp=$(date -d "${log_start_time}" +%s) - if [[ ${log_start_timestamp} -gt ${kb_recovery_timestamp} ]];then - break - fi - done -} - -function save_to_restore_file() { - if [ -f ${DATA_DIR}/.xtrabackup_restore_new_cluster ];then - restore_signal_file=${DATA_DIR}/.xtrabackup_restore_new_cluster - else - restore_signal_file=${DATA_DIR}/.restore_new_cluster - fi - echo "archive_log_index=${PITR_RELATIVE_PATH}/${log_index_name}" > ${restore_signal_file} - kb_recover_time=$(date -d "${KB_RECOVERY_TIME}" -u '+%Y-%m-%d %H:%M:%S') - echo "recovery_target_datetime=${kb_recover_time}" >> ${restore_signal_file} - sync -} - -fetch_pitr_binlogs - -if [ -f ${PITR_DIR}/${log_index_name} ];then - save_to_restore_file - echo "INFO: fetch binlog finished." -else - echo "INFO: didn't get any binlogs." -fi diff --git a/deploy/apecloud-mysql/dataprotection/restore.sh b/deploy/apecloud-mysql/dataprotection/restore.sh new file mode 100644 index 00000000000..89f6b689d74 --- /dev/null +++ b/deploy/apecloud-mysql/dataprotection/restore.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +mkdir -p ${DATA_DIR} +TMP_DIR=${DATA_MOUNT_DIR}/temp +mkdir -p ${TMP_DIR} && cd ${TMP_DIR} +xbstream -x <${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.xbstream +xtrabackup --decompress --remove-original --target-dir=${TMP_DIR} +xtrabackup --prepare --target-dir=${TMP_DIR} +xtrabackup --move-back --target-dir=${TMP_DIR} --datadir=${DATA_DIR}/ --log-bin=${LOG_BIN} +touch ${DATA_DIR}/${SIGNAL_FILE} +rm -rf ${TMP_DIR} +chmod -R 0777 ${DATA_DIR} diff --git a/deploy/apecloud-mysql/templates/actionset.yaml b/deploy/apecloud-mysql/templates/actionset.yaml new file mode 100644 index 00000000000..5d728080701 --- /dev/null +++ b/deploy/apecloud-mysql/templates/actionset.yaml @@ -0,0 +1,65 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: xtrabackup-for-apecloud-mysql + labels: + clusterdefinition.kubeblocks.io/name: apecloud-mysql +spec: + backupType: Full + env: + - name: DATA_DIR + value: {{ .Values.mysqlConfigs.dataDir }} + - name: LOG_BIN + value: {{ .Values.mysqlConfigs.logBin }} + - name: DP_DB_PORT + value: "3306" + - name: DATA_MOUNT_DIR + value: {{ .Values.mysqlConfigs.dataMountPath }} + - name: SIGNAL_FILE + value: .xtrabackup_restore_new_cluster + backup: + preBackup: [] + postBackup: [] + backupData: + image: registry.cn-hangzhou.aliyuncs.com/apecloud/apecloud-xtrabackup:latest + runOnTargetPodNode: true + command: + - sh + - -c + - | + {{- .Files.Get "dataprotection/backup.sh" | nindent 8 }} + syncProgress: + enabled: true + intervalSeconds: 5 + restore: + prepareData: + image: registry.cn-hangzhou.aliyuncs.com/apecloud/apecloud-xtrabackup:latest + command: + - sh + - -c + - | + {{- .Files.Get "dataprotection/restore.sh" | nindent 8 }} + postReady: [] +--- +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: volumesnapshot-for-apecloud-mysql + labels: + clusterdefinition.kubeblocks.io/name: apecloud-mysql +spec: + backupType: Full + env: + - name: DATA_DIR + value: /data/mysql/data + - name: SIGNAL_FILE + value: .restore_new_cluster + backup: {} + restore: + prepareData: + image: registry.cn-hangzhou.aliyuncs.com/apecloud/apecloud-xtrabackup:latest + command: + - sh + - -c + - touch {{ .Values.mysqlConfigs.dataDir }}/${SIGNAL_FILE}; sync + postReady: [] \ No newline at end of file diff --git a/deploy/apecloud-mysql/templates/backuppolicytemplate.yaml b/deploy/apecloud-mysql/templates/backuppolicytemplate.yaml index 06f3d467ac9..462d9b11065 100644 --- a/deploy/apecloud-mysql/templates/backuppolicytemplate.yaml +++ b/deploy/apecloud-mysql/templates/backuppolicytemplate.yaml @@ -11,39 +11,30 @@ spec: clusterDefinitionRef: apecloud-mysql backupPolicies: - componentDefRef: mysql - retention: - ttl: 7d - schedule: - startingDeadlineMinutes: 120 - snapshot: - enable: false - cronExpression: "0 18 * * *" - datafile: - enable: false - cronExpression: "0 18 * * *" - logfile: - enable: false - cronExpression: "*/5 * * * *" - snapshot: - hooks: - containerName: mysql - preCommands: - - touch {{ .Values.mysqlConfigs.dataDir }}/.restore_new_cluster; sync - postCommands: - - "rm -f {{ .Values.mysqlConfigs.dataDir }}/.restore_new_cluster; sync" - target: - role: leader - datafile: - backupToolName: xtrabackup-for-apecloud-mysql - backupStatusUpdates: - - updateStage: post - useTargetPodServiceAccount: true - target: - role: follower - logfile: - backupToolName: apecloud-mysql-pitr-tool - backupStatusUpdates: - - updateStage: post - useTargetPodServiceAccount: true - target: - role: follower + retentionPeriod: 7d + target: + role: follower + backupMethods: + - name: xtrabackup + snapshotVolumes: false + actionSetName: xtrabackup-for-apecloud-mysql + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.mysqlConfigs.dataMountPath }} + - name: volume-snapshot + snapshotVolumes: true + actionSetName: volumesnapshot-for-apecloud-mysql + targetVolumes: + volumes: + - data + volumeMounts: + - name: data + mountPath: {{ .Values.mysqlConfigs.dataMountPath }} + schedules: + - backupMethod: xtrabackup + enabled: false + cronExpression: "0 18 * * *" + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * *" \ No newline at end of file diff --git a/deploy/apecloud-mysql/templates/backuppolicytemplateforhscale.yaml b/deploy/apecloud-mysql/templates/backuppolicytemplateforhscale.yaml index e8c96578d64..cfb63b15e60 100644 --- a/deploy/apecloud-mysql/templates/backuppolicytemplateforhscale.yaml +++ b/deploy/apecloud-mysql/templates/backuppolicytemplateforhscale.yaml @@ -10,14 +10,28 @@ spec: identifier: hscale backupPolicies: - componentDefRef: mysql - snapshot: - hooks: - containerName: mysql - preCommands: - - "touch {{ .Values.mysqlConfigs.dataDir }}/.restore; sync" - postCommands: - - "rm -f {{ .Values.mysqlConfigs.dataDir }}/.restore; sync" - target: - role: leader - datafile: - backupToolName: xtrabackup-for-apecloud-mysql-for-hscale + target: + role: follower + backupMethods: + - name: volume-snapshot + snapshotVolumes: true + actionSetName: volumesnapshot-for-apecloud-mysql + targetVolumes: + volumes: + - data + volumeMounts: + - name: data + mountPath: {{ .Values.mysqlConfigs.dataMountPath }} + env: + - name: SIGNAL_FILE + value: .restore + - name: xtrabackup + snapshotVolumes: false + actionSetName: xtrabackup-for-apecloud-mysql + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.mysqlConfigs.dataMountPath }} + env: + - name: SIGNAL_FILE + value: .xtrabackup_restore \ No newline at end of file diff --git a/deploy/apecloud-mysql/templates/backuptool-pitr.yaml b/deploy/apecloud-mysql/templates/backuptool-pitr.yaml deleted file mode 100644 index e50fc0d5112..00000000000 --- a/deploy/apecloud-mysql/templates/backuptool-pitr.yaml +++ /dev/null @@ -1,63 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - labels: - clusterdefinition.kubeblocks.io/name: apecloud-mysql - kubeblocks.io/backup-tool-type: pitr - {{- include "apecloud-mysql.labels" . | nindent 4 }} - name: apecloud-mysql-pitr-tool -spec: - deployKind: statefulSet - env: - - name: VOLUME_DATA_DIR - value: {{ .Values.mysqlConfigs.dataMountPath }} - - name: DATA_DIR - value: {{ .Values.mysqlConfigs.dataDir }} - - name: PITR_RELATIVE_PATH - value: pitr-logs - - name: PITR_DIR - value: "$(DATA_DIR)/$(PITR_RELATIVE_PATH)" - - name: CONF_DIR - value: "$(VOLUME_DATA_DIR)/conf" - - name: TIME_FORMAT - value: 2006-01-02T15:04:05Z - - name: DP_TARGET_POD_ROLE - # TODO input by backup policy - value: follower - - name: DP_DB_PORT - value: "3306" - - name: DP_INTERVAL_SECONDS - value: "10" - - name: FLUSH_BINLOG_INTERVAL_SECONDS - value: "3600" - image: apecloud/wal-g:mysql-latest - logical: - restoreCommands: - - bash - - -c - - | - #!/bin/bash - set -e; - echo "INFO: waiting for analysis of archive logs to complete." - while true; do - if [ ! -f ${DATA_DIR}/.xtrabackup_restore_new_cluster ] && [ ! -f ${DATA_DIR}/.restore_new_cluster ];then - break - fi - sleep 1 - done - rm -rf ${DATA_DIR}/${PITR_RELATIVE_PATH}; - echo "INFO: remove ${DATA_DIR}/${PITR_RELATIVE_PATH}." - physical: - restoreCommands: - - bash - - -c - - | - set -e; - {{- .Files.Get "dataprotection/pitr-restore.sh" | nindent 8 }} - backupCommands: - - bash - - -c - - | - set -e; - {{- .Files.Get "dataprotection/pitr-backup.sh" | nindent 6 }} - type: pitr diff --git a/deploy/apecloud-mysql/templates/backuptool.yaml b/deploy/apecloud-mysql/templates/backuptool.yaml deleted file mode 100644 index 647bafb3207..00000000000 --- a/deploy/apecloud-mysql/templates/backuptool.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: xtrabackup-for-apecloud-mysql - labels: - clusterdefinition.kubeblocks.io/name: apecloud-mysql - {{- include "apecloud-mysql.labels" . | nindent 4 }} -spec: - image: {{ .Values.image.registry | default "docker.io" }}/apecloud/apecloud-xtrabackup:latest - deployKind: job - env: - - name: DATA_DIR - value: {{ .Values.mysqlConfigs.dataDir }} - - name: LOG_BIN - value: {{ .Values.mysqlConfigs.logBin }} - - name: DP_DB_PORT - value: "3306" - - name: DATA_MOUNT_DIR - value: {{ .Values.mysqlConfigs.dataMountPath }} - physical: - restoreCommands: - - sh - - -c - - | - set -e; - mkdir -p ${DATA_DIR} - TMP_DIR=${DATA_MOUNT_DIR}/temp - mkdir -p ${TMP_DIR} && cd ${TMP_DIR} - xbstream -x < ${BACKUP_DIR}/${BACKUP_NAME}.xbstream - xtrabackup --decompress --remove-original --target-dir=${TMP_DIR} - xtrabackup --prepare --target-dir=${TMP_DIR} - xtrabackup --move-back --target-dir=${TMP_DIR} --datadir=${DATA_DIR}/ --log-bin=${LOG_BIN} - touch ${DATA_DIR}/.xtrabackup_restore_new_cluster - rm -rf ${TMP_DIR} - chmod -R 0777 ${DATA_DIR} - incrementalRestoreCommands: [] - logical: - restoreCommands: [] - incrementalRestoreCommands: [] - backupCommands: - - sh - - -c - - | - set -e; - if [ -d ${BACKUP_DIR} ]; then - rm -rf ${BACKUP_DIR} - fi - mkdir -p ${BACKUP_DIR}; - START_TIME=$(date -u '+%Y-%m-%dT%H:%M:%SZ') - xtrabackup --compress=zstd --backup --safe-slave-backup --slave-info --stream=xbstream \ - --host=${DB_HOST} --user=${DB_USER} --port=${DP_DB_PORT} --password=${DB_PASSWORD} --datadir=${DATA_DIR} > ${BACKUP_DIR}/${BACKUP_NAME}.xbstream - STOP_TIME=$(date -u '+%Y-%m-%dT%H:%M:%SZ') - TOTAL_SIZE=$(du -shx ${BACKUP_DIR}|awk '{print $1}') - echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupLog\":{\"startTime\":\"${START_TIME}\",\"stopTime\":\"${STOP_TIME}\"},\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info - incrementalBackupCommands: [] diff --git a/deploy/apecloud-mysql/templates/backuptoolforhscale.yaml b/deploy/apecloud-mysql/templates/backuptoolforhscale.yaml deleted file mode 100644 index 120cd7a7743..00000000000 --- a/deploy/apecloud-mysql/templates/backuptoolforhscale.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: xtrabackup-for-apecloud-mysql-for-hscale - labels: - clusterdefinition.kubeblocks.io/name: apecloud-mysql - {{- include "apecloud-mysql.labels" . | nindent 4 }} -spec: - image: registry.cn-hangzhou.aliyuncs.com/apecloud/apecloud-xtrabackup:latest - deployKind: job - env: - - name: DATA_DIR - value: {{ .Values.mysqlConfigs.dataDir }} - - name: LOG_BIN - value: {{ .Values.mysqlConfigs.logBin }} - - name: DATA_MOUNT_DIR - value: {{ .Values.mysqlConfigs.dataMountPath }} - physical: - restoreCommands: - - sh - - -c - - | - set -e; - mkdir -p ${DATA_DIR} - TMP_DIR=${DATA_MOUNT_DIR}/temp - mkdir -p ${TMP_DIR} && cd ${TMP_DIR} - xbstream -x < ${BACKUP_DIR}/${BACKUP_NAME}.xbstream - xtrabackup --decompress --target-dir=${TMP_DIR} - xtrabackup --prepare --target-dir=${TMP_DIR} - find . -name "*.qp"|xargs rm -f - xtrabackup --move-back --target-dir=${TMP_DIR} --datadir=${DATA_DIR}/ --log-bin=${LOG_BIN} - touch ${DATA_DIR}/.xtrabackup_restore - rm -rf ${TMP_DIR} - chmod -R 0777 ${DATA_DIR} - incrementalRestoreCommands: [] - logical: - restoreCommands: [] - incrementalRestoreCommands: [] - backupCommands: - - sh - - -c - - | - set -e - mkdir -p ${BACKUP_DIR} - xtrabackup --compress --backup --safe-slave-backup --slave-info --stream=xbstream --host=${DB_HOST} --user=${DB_USER} --password=${DB_PASSWORD} --datadir=${DATA_DIR} > ${BACKUP_DIR}/${BACKUP_NAME}.xbstream - incrementalBackupCommands: [] diff --git a/deploy/helm/config/rbac/role.yaml b/deploy/helm/config/rbac/role.yaml index 716b0ea9ce9..57454b4073c 100644 --- a/deploy/helm/config/rbac/role.yaml +++ b/deploy/helm/config/rbac/role.yaml @@ -566,6 +566,32 @@ rules: - services/status verbs: - get +- apiGroups: + - dataprotection.kubeblocks.io + resources: + - actionsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - dataprotection.kubeblocks.io + resources: + - actionsets/finalizers + verbs: + - update +- apiGroups: + - dataprotection.kubeblocks.io + resources: + - actionsets/status + verbs: + - get + - patch + - update - apiGroups: - dataprotection.kubeblocks.io resources: @@ -649,7 +675,7 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - backuptools + - backupschedules verbs: - create - delete @@ -661,13 +687,13 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - backuptools/finalizers + - backupschedules/finalizers verbs: - update - apiGroups: - dataprotection.kubeblocks.io resources: - - backuptools/status + - backupschedules/status verbs: - get - patch @@ -675,7 +701,7 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs + - restores verbs: - create - delete @@ -687,13 +713,13 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/finalizers + - restores/finalizers verbs: - update - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/status + - restores/status verbs: - get - patch diff --git a/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml b/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml index 5b25e90ea08..2073e6f7e8e 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml @@ -53,6 +53,268 @@ spec: the specified componentDefinition. items: properties: + backupMethods: + description: backupMethods defines the backup methods. + items: + description: BackupMethod defines the backup method. + properties: + actionSetName: + description: actionSetName refers to the ActionSet object + that defines the backup actions. For volume snapshot + backup, the actionSet is not required, the controller + will use the CSI volume snapshotter to create the snapshot. + type: string + env: + description: env specifies the environment variables for + the backup workload. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are + expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + name: + description: the name of backup method. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + runtimeSettings: + description: runtimeSettings specifies runtime settings + for the backup workload container. + properties: + resources: + description: 'resources specifies the resource required + by container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It + can only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + snapshotVolumes: + default: false + description: snapshotVolumes specifies whether to take + snapshots of persistent volumes. if true, the BackupScript + is not required, the controller will use the CSI volume + snapshotter to create the snapshot. + type: boolean + targetVolumes: + description: targetVolumes specifies which volumes from + the target should be mounted in the backup workload. + properties: + volumeMounts: + description: volumeMounts specifies the mount for + the volumes specified in `Volumes` section. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. Behaves similarly to SubPath but + environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes indicates the list of volumes + of targeted application that should be mounted on + the backup job. + items: + type: string + type: array + type: object + required: + - name + type: object + type: array componentDefRef: description: componentDefRef references componentDef defined in ClusterDefinition spec. Need to comply with IANA Service @@ -60,378 +322,86 @@ spec: maxLength: 22 pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ type: string - datafile: - description: the policy for datafile backup. + retentionPeriod: + default: 7d + description: "retentionPeriod determines a duration up to which + the backup should be kept. controller will remove all backups + that are older than the RetentionPeriod. For example, RetentionPeriod + of `30d` will keep only the backups of last 30 days. Sample + duration format: - years: \t2y - months: \t6mo - days: \t\t30d + - hours: \t12h - minutes: \t30m You can also combine the above + durations. For example: 30d12h30m" + type: string + schedules: + description: schedule policy for backup. + items: + properties: + backupMethod: + description: backupMethod specifies the backup method + name that is defined in backupPolicy. + type: string + cronExpression: + description: the cron expression for schedule, the timezone + is in UTC. see https://en.wikipedia.org/wiki/Cron. + type: string + enabled: + description: enabled specifies whether the backup schedule + is enabled or not. + type: boolean + required: + - backupMethod + - cronExpression + type: object + type: array + target: + description: target instance for backup. properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can - execute. - type: string - path: - description: 'specify the json path of backup object - for patch. example: manifests.backupLog -- means - patch the backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect - backup status metadata. The script must exist in - the container of ContainerName and the output format - must be set to JSON. Note that outputting to stderr - may cause the result format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: - before backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup - target pod. if true, will use the service account - of the backup target pod. otherwise, will use the - system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, - only support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + account: + description: refer to spec.componentDef.systemAccounts.accounts[*].name + in ClusterDefinition. the secret created by this account + will be used to connect the database. if not set, the + secret created by spec.ConnectionCredential of the ClusterDefinition + will be used. it will be transformed to a secret for BackupPolicy's + target secret. type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. - Value must be non-negative integer. 0 means NO limit on - the number of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target instance for backup. + connectionCredentialKey: + description: connectionCredentialKey defines connection + credential key in secret which created by spec.ConnectionCredential + of the ClusterDefinition. it will be ignored when "account" + is set. properties: - account: - description: refer to spec.componentDef.systemAccounts.accounts[*].name - in ClusterDefinition. the secret created by this account - will be used to connect the database. if not set, - the secret created by spec.ConnectionCredential of - the ClusterDefinition will be used. it will be transformed - to a secret for BackupPolicy's target secret. + hostKey: + description: hostKey specifies the map key of the host + in the connection credential secret. type: string - connectionCredentialKey: - description: connectionCredentialKey defines connection - credential key in secret which created by spec.ConnectionCredential - of the ClusterDefinition. it will be ignored when - "account" is set. - properties: - passwordKey: - description: the key of password in the ConnectionCredential - secret. if not set, the default key is "password". - type: string - usernameKey: - description: the key of username in the ConnectionCredential - secret. if not set, the default key is "username". - type: string - type: object - role: - description: 'select instance of corresponding role - for backup, role are: - the name of Leader/Follower/Leaner - for Consensus component. - primary or secondary for - Replication component. finally, invalid role of the - component will be ignored. such as if workload type - is Replication and component''s replicas is 1, the - secondary role is invalid. and it also will be ignored - when component is Stateful/Stateless. the role will - be transformed to a role LabelSelector for BackupPolicy''s - target attribute.' + passwordKey: + description: the key of password in the ConnectionCredential + secret. if not set, the default key is "password". type: string - type: object - type: object - logfile: - description: the policy for logfile backup. - properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can - execute. - type: string - path: - description: 'specify the json path of backup object - for patch. example: manifests.backupLog -- means - patch the backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect - backup status metadata. The script must exist in - the container of ContainerName and the output format - must be set to JSON. Note that outputting to stderr - may cause the result format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: - before backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup - target pod. if true, will use the service account - of the backup target pod. otherwise, will use the - system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, - only support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. - Value must be non-negative integer. 0 means NO limit on - the number of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target instance for backup. - properties: - account: - description: refer to spec.componentDef.systemAccounts.accounts[*].name - in ClusterDefinition. the secret created by this account - will be used to connect the database. if not set, - the secret created by spec.ConnectionCredential of - the ClusterDefinition will be used. it will be transformed - to a secret for BackupPolicy's target secret. + portKey: + default: port + description: portKey specifies the map key of the port + in the connection credential secret. type: string - connectionCredentialKey: - description: connectionCredentialKey defines connection - credential key in secret which created by spec.ConnectionCredential - of the ClusterDefinition. it will be ignored when - "account" is set. - properties: - passwordKey: - description: the key of password in the ConnectionCredential - secret. if not set, the default key is "password". - type: string - usernameKey: - description: the key of username in the ConnectionCredential - secret. if not set, the default key is "username". - type: string - type: object - role: - description: 'select instance of corresponding role - for backup, role are: - the name of Leader/Follower/Leaner - for Consensus component. - primary or secondary for - Replication component. finally, invalid role of the - component will be ignored. such as if workload type - is Replication and component''s replicas is 1, the - secondary role is invalid. and it also will be ignored - when component is Stateful/Stateless. the role will - be transformed to a role LabelSelector for BackupPolicy''s - target attribute.' + usernameKey: + description: the key of username in the ConnectionCredential + secret. if not set, the default key is "username". type: string type: object - type: object - retention: - description: retention describe how long the Backup should be - retained. if not set, will be retained forever. - properties: - ttl: - description: ttl is a time string ending with the 'd'|'D'|'h'|'H' - character to describe how long the Backup should be retained. - if not set, will be retained forever. - pattern: ^\d+[d|D|h|H]$ + role: + description: 'select instance of corresponding role for + backup, role are: - the name of Leader/Follower/Leaner + for Consensus component. - primary or secondary for Replication + component. finally, invalid role of the component will + be ignored. such as if workload type is Replication and + component''s replicas is 1, the secondary role is invalid. + and it also will be ignored when component is Stateful/Stateless. + the role will be transformed to a role LabelSelector for + BackupPolicy''s target attribute.' type: string type: object - schedule: - description: schedule policy for backup. - properties: - datafile: - description: schedule policy for datafile backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. - type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - logfile: - description: schedule policy for logfile backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. - type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - snapshot: - description: schedule policy for snapshot backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. - type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - startingDeadlineMinutes: - description: startingDeadlineMinutes defines the deadline - in minutes for starting the backup job if it misses scheduled - time for any reason. - format: int64 - maximum: 1440 - minimum: 0 - type: integer - type: object - snapshot: - description: the policy for snapshot backup. - properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can - execute. - type: string - path: - description: 'specify the json path of backup object - for patch. example: manifests.backupLog -- means - patch the backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect - backup status metadata. The script must exist in - the container of ContainerName and the output format - must be set to JSON. Note that outputting to stderr - may cause the result format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: - before backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup - target pod. if true, will use the service account - of the backup target pod. otherwise, will use the - system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. - Value must be non-negative integer. 0 means NO limit on - the number of backups. - format: int32 - type: integer - hooks: - description: execute hook commands for backup. - properties: - containerName: - description: which container can exec command - type: string - image: - description: exec command with image - type: string - postCommands: - description: post backup to perform commands - items: - type: string - type: array - preCommands: - description: pre backup to perform commands - items: - type: string - type: array - type: object - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target instance for backup. - properties: - account: - description: refer to spec.componentDef.systemAccounts.accounts[*].name - in ClusterDefinition. the secret created by this account - will be used to connect the database. if not set, - the secret created by spec.ConnectionCredential of - the ClusterDefinition will be used. it will be transformed - to a secret for BackupPolicy's target secret. - type: string - connectionCredentialKey: - description: connectionCredentialKey defines connection - credential key in secret which created by spec.ConnectionCredential - of the ClusterDefinition. it will be ignored when - "account" is set. - properties: - passwordKey: - description: the key of password in the ConnectionCredential - secret. if not set, the default key is "password". - type: string - usernameKey: - description: the key of username in the ConnectionCredential - secret. if not set, the default key is "username". - type: string - type: object - role: - description: 'select instance of corresponding role - for backup, role are: - the name of Leader/Follower/Leaner - for Consensus component. - primary or secondary for - Replication component. finally, invalid role of the - component will be ignored. such as if workload type - is Replication and component''s replicas is 1, the - secondary role is invalid. and it also will be ignored - when component is Stateful/Stateless. the role will - be transformed to a role LabelSelector for BackupPolicy''s - target attribute.' - type: string - type: object - type: object required: + - backupMethods - componentDefRef type: object minItems: 1 diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml index f06624d15d3..d741c62fef4 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml @@ -118,15 +118,7 @@ spec: description: enabled defines whether to enable automated backup. type: boolean method: - allOf: - - enum: - - snapshot - - backupTool - - enum: - - snapshot - - backupTool - default: snapshot - description: 'backup method, support: snapshot, backupTool.' + description: backup method name to use, that is defined in backupPolicy. type: string pitrEnabled: default: false @@ -138,11 +130,14 @@ spec: will use the default backupRepo. type: string retentionPeriod: - default: 1d - description: retentionPeriod is a time string ending with the - 'd'|'D'|'h'|'H' character to describe how long the Backup should - be retained. if not set, will be retained forever. - pattern: ^\d+[d|D|h|H]$ + default: 7d + description: "retentionPeriod determines a duration up to which + the backup should be kept. controller will remove all backups + that are older than the RetentionPeriod. For example, RetentionPeriod + of `30d` will keep only the backups of last 30 days. Sample + duration format: - years: \t2y - months: \t6mo - days: \t\t30d + - hours: \t12h - minutes: \t30m You can also combine the above + durations. For example: 30d12h30m" type: string startingDeadlineMinutes: description: startingDeadlineMinutes defines the deadline in minutes @@ -152,8 +147,6 @@ spec: maximum: 1440 minimum: 0 type: integer - required: - - method type: object clusterDefinitionRef: description: Cluster referencing ClusterDefinition name. This is an diff --git a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml index 4c5da53a769..16bf1252b31 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml @@ -63,17 +63,15 @@ spec: backupSpec: description: backupSpec defines how to backup the cluster. properties: + backupMethod: + description: Backup method name that is defined in backupPolicy. + type: string backupName: description: backupName is the name of the backup. type: string backupPolicyName: description: Which backupPolicy is applied to perform this backup type: string - backupType: - default: datafile - description: Backup Type. datafile or logfile or snapshot. If - not set, datafile is the default type. - type: string parentBackupName: description: if backupType is incremental, parentBackupName is required. diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_actionsets.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_actionsets.yaml new file mode 100644 index 00000000000..85c9df43f48 --- /dev/null +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_actionsets.yaml @@ -0,0 +1,554 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: actionsets.dataprotection.kubeblocks.io +spec: + group: dataprotection.kubeblocks.io + names: + categories: + - kubeblocks + kind: ActionSet + listKind: ActionSetList + plural: actionsets + shortNames: + - as + singular: actionset + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backupType + name: BACKUP-TYPE + type: string + - jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ActionSet is the Schema for the actionsets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ActionSetSpec defines the desired state of ActionSet + properties: + backup: + description: backup specifies the backup action. + properties: + backupData: + description: backupData specifies the backup data action. + properties: + command: + description: command specifies the commands to back up the + volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if it encounters + an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to run the + job workload on the target pod node. If backup container + should mount the target pod's volume, this field should + be set to true. + type: boolean + syncProgress: + description: syncProgress specifies whether to sync the backup + progress and its interval seconds. + properties: + enabled: + description: enabled specifies whether to sync the backup + progress. If enabled, a sidecar container will be created + to sync the backup progress to the Backup CR status. + type: boolean + intervalSeconds: + default: 60 + description: intervalSeconds specifies the interval seconds + to sync the backup progress. + format: int32 + type: integer + type: object + required: + - command + - image + type: object + postBackup: + description: postBackup specifies a hook that should be executed + after the backup. + items: + description: ActionSpec defines an action that should be executed. + Only one of the fields may be set. + properties: + exec: + description: exec specifies the action should be executed + by the pod exec API in a container. + properties: + command: + description: Command is the command and arguments to + execute. + items: + type: string + minItems: 1 + type: array + container: + description: container is the container in the pod where + the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time + should wait for the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + job: + description: job specifies the action should be executed + by a Kubernetes Job. + properties: + command: + description: command specifies the commands to back + up the volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to + run the job workload on the target pod node. If backup + container should mount the target pod's volume, this + field should be set to true. + type: boolean + required: + - command + - image + type: object + type: object + type: array + preBackup: + description: preBackup specifies a hook that should be executed + before the backup. + items: + description: ActionSpec defines an action that should be executed. + Only one of the fields may be set. + properties: + exec: + description: exec specifies the action should be executed + by the pod exec API in a container. + properties: + command: + description: Command is the command and arguments to + execute. + items: + type: string + minItems: 1 + type: array + container: + description: container is the container in the pod where + the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time + should wait for the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + job: + description: job specifies the action should be executed + by a Kubernetes Job. + properties: + command: + description: command specifies the commands to back + up the volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to + run the job workload on the target pod node. If backup + container should mount the target pod's volume, this + field should be set to true. + type: boolean + required: + - command + - image + type: object + type: object + type: array + type: object + backupType: + allOf: + - enum: + - Full + - Incremental + - Differential + - Continuous + - enum: + - Full + - Incremental + - Differential + - Continuous + default: Full + description: 'backupType specifies the backup type, supported values: + Full, Continuous. Full means full backup. Incremental means back + up data that have changed since the last backup (full or incremental). + Differential means back up data that have changed since the last + full backup. Continuous will back up the transaction log continuously, + the PITR (Point in Time Recovery). can be performed based on the + continuous backup and full backup.' + type: string + env: + description: List of environment variables to set in the container. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the + string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + envFrom: + description: List of sources to populate environment variables in + the container. The keys defined within a source must be a C_IDENTIFIER. + All invalid keys will be reported as an event when the container + is starting. When a key exists in multiple sources, the value associated + with the last source will take precedence. Values defined by an + Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + restore: + description: restore specifies the restore action. + properties: + postReady: + description: postReady specifies the action to execute after the + data is ready. + items: + description: ActionSpec defines an action that should be executed. + Only one of the fields may be set. + properties: + exec: + description: exec specifies the action should be executed + by the pod exec API in a container. + properties: + command: + description: Command is the command and arguments to + execute. + items: + type: string + minItems: 1 + type: array + container: + description: container is the container in the pod where + the command should be executed. If not specified, + the pod's first container is used. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time + should wait for the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + job: + description: job specifies the action should be executed + by a Kubernetes Job. + properties: + command: + description: command specifies the commands to back + up the volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if + it encounters an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to + run the job workload on the target pod node. If backup + container should mount the target pod's volume, this + field should be set to true. + type: boolean + required: + - command + - image + type: object + type: object + type: array + prepareData: + description: prepareData specifies the action to prepare data. + properties: + command: + description: command specifies the commands to back up the + volume data. + items: + type: string + type: array + image: + description: image specifies the image of backup container. + type: string + onError: + default: Fail + description: OnError specifies how should behave if it encounters + an error executing this action. + enum: + - Continue + - Fail + type: string + runOnTargetPodNode: + default: false + description: runOnTargetPodNode specifies whether to run the + job workload on the target pod node. If backup container + should mount the target pod's volume, this field should + be set to true. + type: boolean + required: + - command + - image + type: object + type: object + required: + - backupType + type: object + status: + description: ActionSetStatus defines the observed state of ActionSet + properties: + message: + description: A human-readable message indicating details about why + the ActionSet is in this phase. + type: string + observedGeneration: + description: generation number + format: int64 + type: integer + phase: + description: phase - in list of [Available,Unavailable] + enum: + - Available + - Unavailable + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml index 660c40806d3..ec61183f863 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml @@ -20,20 +20,19 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: + - jsonPath: .spec.backupRepoName + name: BACKUP-REPO + type: string - jsonPath: .status.phase name: STATUS type: string - - jsonPath: .status.lastScheduleTime - name: LAST SCHEDULE - type: string - jsonPath: .metadata.creationTimestamp name: AGE type: date name: v1alpha1 schema: openAPIV3Schema: - description: BackupPolicy is the Schema for the backuppolicies API (defined - by User) + description: BackupPolicy is the Schema for the backuppolicies API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -50,542 +49,389 @@ spec: spec: description: BackupPolicySpec defines the desired state of BackupPolicy properties: - datafile: - description: the policy for datafile backup. - properties: - backupRepoName: - description: refer to BackupRepo and the backup data will be stored - in the corresponding repo. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can execute. - type: string - path: - description: 'specify the json path of backup object for - patch. example: manifests.backupLog -- means patch the - backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect backup - status metadata. The script must exist in the container - of ContainerName and the output format must be set to - JSON. Note that outputting to stderr may cause the result - format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: before - backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup target - pod. if true, will use the service account of the backup - target pod. otherwise, will use the system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, only - support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. Value - must be non-negative integer. 0 means NO limit on the number - of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - persistentVolumeClaim: - description: refer to PersistentVolumeClaim and the backup data - will be stored in the corresponding persistent volume. - properties: - createPolicy: - default: IfNotPresent - description: 'createPolicy defines the policy for creating - the PersistentVolumeClaim, enum values: - Never: do nothing - if the PersistentVolumeClaim not exists. - IfNotPresent: - create the PersistentVolumeClaim if not present and the - accessModes only contains ''ReadWriteMany''.' - enum: - - IfNotPresent - - Never - type: string - initCapacity: - anyOf: - - type: integer - - type: string - description: initCapacity represents the init storage size - of the PersistentVolumeClaim which should be created if - not exist. and the default value is 100Gi if it is empty. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - name: - description: the name of PersistentVolumeClaim to store backup - data. - type: string - persistentVolumeConfigMap: - description: 'persistentVolumeConfigMap references the configmap - which contains a persistentVolume template. key must be - "persistentVolume" and value is the "PersistentVolume" struct. - support the following built-in Objects: - $(GENERATE_NAME): - generate a specific format "`PVC NAME`-`PVC NAMESPACE`". - if the PersistentVolumeClaim not exists and CreatePolicy - is "IfNotPresent", the controller will create it by this - template. this is a mutually exclusive setting with "storageClassName".' + backoffLimit: + description: Specifies the number of retries before marking the backup + failed. + format: int32 + maximum: 10 + minimum: 0 + type: integer + backupMethods: + description: backupMethods defines the backup methods. + items: + description: BackupMethod defines the backup method. + properties: + actionSetName: + description: actionSetName refers to the ActionSet object that + defines the backup actions. For volume snapshot backup, the + actionSet is not required, the controller will use the CSI + volume snapshotter to create the snapshot. + type: string + env: + description: env specifies the environment variables for the + backup workload. + items: + description: EnvVar represents an environment variable present + in a Container. properties: name: - description: the name of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + description: Name of the environment variable. Must be + a C_IDENTIFIER. type: string - namespace: - description: the namespace of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' type: string - required: - - name - - namespace - type: object - storageClassName: - description: storageClassName is the name of the StorageClass - required by the claim. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - type: object - target: - description: target database cluster for backup. - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup - policy template. if still not set, the controller will check - if any system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: passwordKey the map key of the password in - the connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the - connection credential secret - type: string required: - name type: object - required: - - labelsSelector - type: object - required: - - target - type: object - logfile: - description: the policy for logfile backup. - properties: - backupRepoName: - description: refer to BackupRepo and the backup data will be stored - in the corresponding repo. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupStatusUpdates: - description: define how to update metadata for backup status. - items: + type: array + name: + description: the name of backup method. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + runtimeSettings: + description: runtimeSettings specifies runtime settings for + the backup workload container. properties: - containerName: - description: which container name that kubectl can execute. - type: string - path: - description: 'specify the json path of backup object for - patch. example: manifests.backupLog -- means patch the - backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect backup - status metadata. The script must exist in the container - of ContainerName and the output format must be set to - JSON. Note that outputting to stderr may cause the result - format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: before - backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup target - pod. if true, will use the service account of the backup - target pod. otherwise, will use the system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupToolName: - description: which backup tool to perform database backup, only - support one tool. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. Value - must be non-negative integer. 0 means NO limit on the number - of backups. - format: int32 - type: integer - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - persistentVolumeClaim: - description: refer to PersistentVolumeClaim and the backup data - will be stored in the corresponding persistent volume. - properties: - createPolicy: - default: IfNotPresent - description: 'createPolicy defines the policy for creating - the PersistentVolumeClaim, enum values: - Never: do nothing - if the PersistentVolumeClaim not exists. - IfNotPresent: - create the PersistentVolumeClaim if not present and the - accessModes only contains ''ReadWriteMany''.' - enum: - - IfNotPresent - - Never - type: string - initCapacity: - anyOf: - - type: integer - - type: string - description: initCapacity represents the init storage size - of the PersistentVolumeClaim which should be created if - not exist. and the default value is 100Gi if it is empty. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - name: - description: the name of PersistentVolumeClaim to store backup - data. - type: string - persistentVolumeConfigMap: - description: 'persistentVolumeConfigMap references the configmap - which contains a persistentVolume template. key must be - "persistentVolume" and value is the "PersistentVolume" struct. - support the following built-in Objects: - $(GENERATE_NAME): - generate a specific format "`PVC NAME`-`PVC NAMESPACE`". - if the PersistentVolumeClaim not exists and CreatePolicy - is "IfNotPresent", the controller will create it by this - template. this is a mutually exclusive setting with "storageClassName".' - properties: - name: - description: the name of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - namespace: - description: the namespace of the persistentVolume ConfigMap. - pattern: ^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$ - type: string - required: - - name - - namespace - type: object - storageClassName: - description: storageClassName is the name of the StorageClass - required by the claim. - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - type: object - target: - description: target database cluster for backup. - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: + resources: + description: 'resources specifies the resource required + by container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where + this field is used. It makes that resource available + inside a container. type: string - type: array - required: - - key - - operator + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + snapshotVolumes: + default: false + description: snapshotVolumes specifies whether to take snapshots + of persistent volumes. if true, the BackupScript is not required, + the controller will use the CSI volume snapshotter to create + the snapshot. + type: boolean + targetVolumes: + description: targetVolumes specifies which volumes from the + target should be mounted in the backup workload. + properties: + volumeMounts: + description: volumeMounts specifies the mount for the volumes + specified in `Volumes` section. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the + volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and the + other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults to + "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the container's + environment. Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup - policy template. if still not set, the controller will check - if any system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: array + volumes: + description: Volumes indicates the list of volumes of targeted + application that should be mounted on the backup job. + items: type: string - passwordKey: - default: password - description: passwordKey the map key of the password in - the connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the - connection credential secret - type: string - required: - - name - type: object - required: - - labelsSelector - type: object - required: - - target - type: object - retention: - description: retention describe how long the Backup should be retained. - if not set, will be retained forever. - properties: - ttl: - description: ttl is a time string ending with the 'd'|'D'|'h'|'H' - character to describe how long the Backup should be retained. - if not set, will be retained forever. - pattern: ^\d+[d|D|h|H]$ - type: string - type: object - schedule: - description: schedule policy for backup. + type: array + type: object + required: + - name + type: object + type: array + backupRepoName: + description: backupRepoName is the name of BackupRepo and the backup + data will be stored in this repository. If not set, will be stored + in the default backup repository. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + pathPrefix: + description: pathPrefix is the directory inside the backup repository + to store the backup content. It is a relative to the path of the + backup repository. + type: string + target: + description: target specifies the target information to back up. properties: - datafile: - description: schedule policy for datafile backup. + connectionCredential: + description: connectionCredential specifies the connection credential + to connect to the target database cluster. properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. + hostKey: + description: hostKey specifies the map key of the host in + the connection credential secret. type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable - type: object - logfile: - description: schedule policy for logfile backup. - properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. + passwordKey: + default: password + description: passwordKey specifies the map key of the password + in the connection credential secret. + type: string + portKey: + default: port + description: portKey specifies the map key of the port in + the connection credential secret. + type: string + secretName: + description: secretName refers to the Secret object that contains + the connection credential. + type: string + usernameKey: + default: username + description: usernameKey specifies the map key of the user + in the connection credential secret. type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable type: object - snapshot: - description: schedule policy for snapshot backup. + podSelector: + description: podSelector is used to find the target pod. The volumes + of the target pod will be backed up. properties: - cronExpression: - description: the cron expression for schedule, the timezone - is in UTC. see https://en.wikipedia.org/wiki/Cron. + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + strategy: + default: Any + description: 'strategy specifies the strategy to select the + target pod when multiple pods are selected. Valid values + are: - All: select all pods that match the labelsSelector. + - Any: select any one pod that match the labelsSelector.' + enum: + - All + - Any type: string - enable: - description: enable or disable the schedule. - type: boolean - required: - - cronExpression - - enable type: object - startingDeadlineMinutes: - description: startingDeadlineMinutes defines the deadline in minutes - for starting the backup job if it misses scheduled time for - any reason. - format: int64 - maximum: 1440 - minimum: 0 - type: integer - type: object - snapshot: - description: the policy for snapshot backup. - properties: - backupStatusUpdates: - description: define how to update metadata for backup status. - items: - properties: - containerName: - description: which container name that kubectl can execute. - type: string - path: - description: 'specify the json path of backup object for - patch. example: manifests.backupLog -- means patch the - backup json path of status.manifests.backupLog.' - type: string - script: - description: the shell Script commands to collect backup - status metadata. The script must exist in the container - of ContainerName and the output format must be set to - JSON. Note that outputting to stderr may cause the result - format to not be in JSON. - type: string - updateStage: - description: 'when to update the backup status, pre: before - backup, post: after backup' - enum: - - pre - - post - type: string - useTargetPodServiceAccount: - description: useTargetPodServiceAccount defines whether - this job requires the service account of the backup target - pod. if true, will use the service account of the backup - target pod. otherwise, will use the system service account. - type: boolean - required: - - updateStage - type: object - type: array - backupsHistoryLimit: - default: 7 - description: the number of automatic backups to retain. Value - must be non-negative integer. 0 means NO limit on the number - of backups. - format: int32 - type: integer - hooks: - description: execute hook commands for backup. + x-kubernetes-map-type: atomic + resources: + description: resources specifies the kubernetes resources to back + up. properties: - containerName: - description: which container can exec command - type: string - image: - description: exec command with image - type: string - postCommands: - description: post backup to perform commands + excluded: + description: excluded is a slice of namespaced-scoped resource + type names to exclude in the kubernetes resources. The default + value is empty. items: type: string type: array - preCommands: - description: pre backup to perform commands + included: + default: + - '*' + description: included is a slice of namespaced-scoped resource + type names to include in the kubernetes resources. The default + value is "*", which means all resource types will be included. items: type: string type: array - type: object - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: target database cluster for backup. - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. + selector: + description: selector is a metav1.LabelSelector to filter + the target kubernetes resources that need to be backed up. + If not set, will do not back up any kubernetes resources. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -630,64 +476,34 @@ spec: type: object type: object x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup - policy template. if still not set, the controller will check - if any system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: passwordKey the map key of the password in - the connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the - connection credential secret - type: string - required: - - name - type: object - required: - - labelsSelector type: object - required: - - target + serviceAccountName: + description: serviceAccountName specifies the service account + to run the backup workload. + type: string type: object + required: + - backupMethods + - target type: object status: description: BackupPolicyStatus defines the observed state of BackupPolicy properties: - failureReason: - description: the reason if backup policy check failed. - type: string - lastScheduleTime: - description: information when was the last time the job was successfully - scheduled. - format: date-time - type: string - lastSuccessfulTime: - description: information when was the last time the job successfully - completed. - format: date-time + message: + description: A human-readable message indicating details about why + the BackupPolicy is in this phase. type: string observedGeneration: description: observedGeneration is the most recent generation observed - for this BackupPolicy. It corresponds to the Cluster's generation, + for this BackupPolicy. It refers to the BackupPolicy's generation, which is updated on mutation by the API Server. format: int64 type: integer phase: - description: 'backup policy phase valid value: Available, Failed.' + description: phase - in list of [Available,Unavailable] enum: - Available - - Failed + - Unavailable type: string type: object type: object diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml index 27fa8f29e72..0126b52ced5 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml @@ -18,15 +18,18 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .spec.backupType - name: TYPE + - jsonPath: .spec.backupPolicyName + name: POLICY + type: string + - jsonPath: .spec.backupMethod + name: METHOD + type: string + - jsonPath: .status.backupRepoName + name: REPO type: string - jsonPath: .status.phase name: STATUS type: string - - jsonPath: .status.sourceCluster - name: SOURCE-CLUSTER - type: string - jsonPath: .status.totalSize name: TOTAL-SIZE type: string @@ -34,15 +37,18 @@ spec: name: DURATION type: string - jsonPath: .metadata.creationTimestamp - name: CREATE-TIME + name: CREATION-TIME type: string - jsonPath: .status.completionTimestamp name: COMPLETION-TIME type: string + - jsonPath: .status.expiration + name: EXPIRATION-TIME + type: string name: v1alpha1 schema: openAPIV3Schema: - description: Backup is the Schema for the backups API (defined by User). + description: Backup is the Schema for the backups API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -59,135 +65,444 @@ spec: spec: description: BackupSpec defines the desired state of Backup. properties: + backupMethod: + description: backupMethod specifies the backup method name that is + defined in backupPolicy. + type: string backupPolicyName: - description: Which backupPolicy is applied to perform this backup + description: Which backupPolicy is applied to perform this backup. pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string - backupType: - default: datafile - description: Backup Type. datafile or logfile or snapshot. If not - set, datafile is the default type. - enum: - - datafile - - logfile - - snapshot + deletionPolicy: + allOf: + - enum: + - Delete + - Retain + - enum: + - Delete + - Retain + default: Delete + description: deletionPolicy determines whether the backup contents + stored in backup repository should be deleted when the backup custom + resource is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the backup content and its physical snapshot + on backup repository are kept. "Delete" means that the backup content + and its physical snapshot on backup repository are deleted. type: string parentBackupName: - description: if backupType is incremental, parentBackupName is required. + description: parentBackupName determines the parent backup name for + incremental or differential backup. + type: string + retentionPeriod: + default: 7d + description: "retentionPeriod determines a duration up to which the + backup should be kept. controller will remove all backups that are + older than the RetentionPeriod. For example, RetentionPeriod of + `30d` will keep only the backups of last 30 days. Sample duration + format: - years: \t2y - months: \t6mo - days: \t\t30d - hours: \t12h + - minutes: \t30m You can also combine the above durations. For example: + 30d12h30m" type: string required: + - backupMethod - backupPolicyName - - backupType type: object status: description: BackupStatus defines the observed state of Backup. properties: - availableReplicas: - description: availableReplicas available replicas for statefulSet - which created by backup. - format: int32 - type: integer - backupToolName: - description: backupToolName references the backup tool name. + actions: + description: actions records the actions information for this backup. + items: + properties: + actionType: + description: actionType is the type of the action. + type: string + availableReplicas: + description: availableReplicas available replicas for statefulSet + action. + format: int32 + type: integer + completionTimestamp: + description: completionTimestamp records the time an action + was completed. + format: date-time + type: string + failureReason: + description: failureReason is an error that caused the backup + to fail. + type: string + name: + description: name is the name of the action. + type: string + objectRef: + description: objectRef is the object reference for the action. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + phase: + description: phase is the current state of the action. + type: string + startTimestamp: + description: startTimestamp records the time an action was started. + format: date-time + type: string + timeRange: + description: timeRange records the time range of backed up data, + for PITR, this is the time range of recoverable data. + properties: + end: + description: end records the end time of backup. + format: date-time + type: string + start: + description: start records the start time of backup. + format: date-time + type: string + type: object + totalSize: + description: totalSize is the total size of backed up data size. + A string with capacity units in the format of "1Gi", "1Mi", + "1Ki". + type: string + volumeSnapshots: + description: volumeSnapshots records the volume snapshot status + for the action. + items: + properties: + contentName: + description: contentName is the name of the volume snapshot + content. + type: string + name: + description: name is the name of the volume snapshot. + type: string + size: + description: size is the size of the volume snapshot. + type: string + volumeName: + description: volumeName is the name of the volume. + type: string + type: object + type: array + type: object + type: array + backupMethod: + description: backupMethod records the backup method information for + this backup. Refer to BackupMethod for more details. + properties: + actionSetName: + description: actionSetName refers to the ActionSet object that + defines the backup actions. For volume snapshot backup, the + actionSet is not required, the controller will use the CSI volume + snapshotter to create the snapshot. + type: string + env: + description: env specifies the environment variables for the backup + workload. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + name: + description: the name of backup method. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + runtimeSettings: + description: runtimeSettings specifies runtime settings for the + backup workload container. + properties: + resources: + description: 'resources specifies the resource required by + container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. \n This field + is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + snapshotVolumes: + default: false + description: snapshotVolumes specifies whether to take snapshots + of persistent volumes. if true, the BackupScript is not required, + the controller will use the CSI volume snapshotter to create + the snapshot. + type: boolean + targetVolumes: + description: targetVolumes specifies which volumes from the target + should be mounted in the backup workload. + properties: + volumeMounts: + description: volumeMounts specifies the mount for the volumes + specified in `Volumes` section. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the + volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and the + other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's + root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves + similarly to SubPath but environment variable references + $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes indicates the list of volumes of targeted + application that should be mounted on the backup job. + items: + type: string + type: array + type: object + required: + - name + type: object + backupRepoName: + description: backupRepoName is the name of the backup repository. type: string completionTimestamp: - description: Date/time when the backup finished being processed. + description: completionTimestamp records the time a backup was completed. + Completion time is recorded even on failed backups. The server's + time is used for CompletionTimestamp. format: date-time type: string duration: description: The duration time of backup execution. When converted - to a string, the form is "1h2m0.5s". + to a string, the format is "1h2m0.5s". type: string expiration: - description: The date and time when the Backup is eligible for garbage - collection. 'null' means the Backup is NOT be cleaned except delete + description: expiration is when this backup is eligible for garbage + collection. 'null' means the Backup will NOT be cleaned except delete manual. format: date-time type: string failureReason: - description: The reason for a backup failure. + description: failureReason is an error that caused the backup to fail. type: string - logFilePersistentVolumeClaimName: - description: logFilePersistentVolumeClaimName saves the logfile backup - data. + formatVersion: + description: formatVersion is the backup format version, including + major, minor and patch version. type: string - manifests: - description: manifests determines the backup metadata info. - properties: - backupLog: - description: backupLog records startTime and stopTime of data - logging. - properties: - startTime: - description: startTime records the start time of data logging. - format: date-time - type: string - stopTime: - description: stopTime records the stop time of data logging. - format: date-time - type: string - type: object - backupSnapshot: - description: snapshot records the volume snapshot metadata. - properties: - volumeSnapshotContentName: - description: volumeSnapshotContentName specifies the name - of a pre-existing VolumeSnapshotContent object representing - an existing volume snapshot. This field should be set if - the snapshot already exists and only needs a representation - in Kubernetes. This field is immutable. - type: string - volumeSnapshotName: - description: volumeSnapshotName records the volumeSnapshot - name. - type: string - type: object - backupTool: - description: backupTool records information about backup files - generated by the backup tool. - properties: - checkpoint: - description: backup checkpoint, for incremental backup. - type: string - checksum: - description: checksum of backup file, generated by md5 or - sha1 or sha256. - type: string - filePath: - description: filePath records the file path of backup. - type: string - logFilePath: - description: logFilePath records the log file path of backup. - type: string - uploadTotalSize: - description: Backup upload total size. A string with capacity - units in the form of "1Gi", "1Mi", "1Ki". - type: string - volumeName: - description: volumeName records volume name of backup data - pvc. - type: string - type: object - target: - description: target records the target cluster metadata string, - which is in JSON format. - type: string - userContext: - additionalProperties: - type: string - description: userContext stores some loosely structured and extensible - information. - type: object - type: object - parentBackupName: - description: Records parentBackupName if backupType is incremental. + path: + description: path is the directory inside the backup repository where + the backup data is stored. It is an absolute path in the backup + repository. type: string persistentVolumeClaimName: - description: remoteVolume saves the backup data. + description: persistentVolumeClaimName is the name of the persistent + volume claim that is used to store the backup data. type: string phase: - description: BackupPhase The current phase. Valid values are New, - InProgress, Completed, Failed. + description: phase is the current state of the Backup. enum: - New - InProgress @@ -196,18 +511,209 @@ spec: - Failed - Deleting type: string - sourceCluster: - description: sourceCluster records the source cluster information - for this backup. - type: string startTimestamp: - description: Date/time when the backup started being processed. + description: startTimestamp records the time a backup was started. + The server's time is used for StartTimestamp. format: date-time type: string + target: + description: target records the target information for this backup. + properties: + connectionCredential: + description: connectionCredential specifies the connection credential + to connect to the target database cluster. + properties: + hostKey: + description: hostKey specifies the map key of the host in + the connection credential secret. + type: string + passwordKey: + default: password + description: passwordKey specifies the map key of the password + in the connection credential secret. + type: string + portKey: + default: port + description: portKey specifies the map key of the port in + the connection credential secret. + type: string + secretName: + description: secretName refers to the Secret object that contains + the connection credential. + type: string + usernameKey: + default: username + description: usernameKey specifies the map key of the user + in the connection credential secret. + type: string + type: object + podSelector: + description: podSelector is used to find the target pod. The volumes + of the target pod will be backed up. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + strategy: + default: Any + description: 'strategy specifies the strategy to select the + target pod when multiple pods are selected. Valid values + are: - All: select all pods that match the labelsSelector. + - Any: select any one pod that match the labelsSelector.' + enum: + - All + - Any + type: string + type: object + x-kubernetes-map-type: atomic + resources: + description: resources specifies the kubernetes resources to back + up. + properties: + excluded: + description: excluded is a slice of namespaced-scoped resource + type names to exclude in the kubernetes resources. The default + value is empty. + items: + type: string + type: array + included: + default: + - '*' + description: included is a slice of namespaced-scoped resource + type names to include in the kubernetes resources. The default + value is "*", which means all resource types will be included. + items: + type: string + type: array + selector: + description: selector is a metav1.LabelSelector to filter + the target kubernetes resources that need to be backed up. + If not set, will do not back up any kubernetes resources. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + serviceAccountName: + description: serviceAccountName specifies the service account + to run the backup workload. + type: string + type: object + timeRange: + description: timeRange records the time range of backed up data, for + PITR, this is the time range of recoverable data. + properties: + end: + description: end records the end time of backup. + format: date-time + type: string + start: + description: start records the start time of backup. + format: date-time + type: string + type: object totalSize: - description: Backup total size. A string with capacity units in the - form of "1Gi", "1Mi", "1Ki". + description: totalSize is the total size of backed up data size. A + string with capacity units in the format of "1Gi", "1Mi", "1Ki". type: string + volumeSnapshots: + description: volumeSnapshots records the volume snapshot status for + the action. + items: + properties: + contentName: + description: contentName is the name of the volume snapshot + content. + type: string + name: + description: name is the name of the volume snapshot. + type: string + size: + description: size is the size of the volume snapshot. + type: string + volumeName: + description: volumeName is the name of the volume. + type: string + type: object + type: array type: object type: object served: true diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backupschedules.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backupschedules.yaml new file mode 100644 index 00000000000..40d07aa5fc7 --- /dev/null +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backupschedules.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: backupschedules.dataprotection.kubeblocks.io +spec: + group: dataprotection.kubeblocks.io + names: + categories: + - kubeblocks + kind: BackupSchedule + listKind: BackupScheduleList + plural: backupschedules + shortNames: + - bs + singular: backupschedule + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: BackupSchedule is the Schema for the backupschedules API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupScheduleSpec defines the desired state of BackupSchedule. + properties: + backupPolicyName: + description: Which backupPolicy is applied to perform this backup. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + schedules: + description: schedules defines the list of backup schedules. + items: + properties: + backupMethod: + description: backupMethod specifies the backup method name that + is defined in backupPolicy. + type: string + cronExpression: + description: the cron expression for schedule, the timezone + is in UTC. see https://en.wikipedia.org/wiki/Cron. + type: string + enabled: + description: enabled specifies whether the backup schedule is + enabled or not. + type: boolean + retentionPeriod: + default: 7d + description: "retentionPeriod determines a duration up to which + the backup should be kept. controller will remove all backups + that are older than the RetentionPeriod. For example, RetentionPeriod + of `30d` will keep only the backups of last 30 days. Sample + duration format: - years: \t2y - months: \t6mo - days: \t\t30d + - hours: \t12h - minutes: \t30m You can also combine the above + durations. For example: 30d12h30m" + type: string + required: + - backupMethod + - cronExpression + type: object + minItems: 1 + type: array + startingDeadlineMinutes: + description: startingDeadlineMinutes defines the deadline in minutes + for starting the backup workload if it misses scheduled time for + any reason. + format: int64 + maximum: 1440 + minimum: 0 + type: integer + required: + - backupPolicyName + - schedules + type: object + status: + description: BackupScheduleStatus defines the observed state of BackupSchedule. + properties: + failureReason: + description: failureReason is an error that caused the backup to fail. + type: string + observedGeneration: + description: observedGeneration is the most recent generation observed + for this BackupSchedule. It refers to the BackupSchedule's generation, + which is updated on mutation by the API Server. + format: int64 + type: integer + phase: + description: phase describes the phase of the BackupSchedule. + type: string + schedules: + additionalProperties: + description: ScheduleStatus defines the status of each schedule. + properties: + failureReason: + description: failureReason is an error that caused the backup + to fail. + type: string + lastScheduleTime: + description: lastScheduleTime records the last time the backup + was scheduled. + format: date-time + type: string + lastSuccessfulTime: + description: lastSuccessfulTime records the last time the backup + was successfully completed. + format: date-time + type: string + phase: + description: phase describes the phase of the schedule. + type: string + type: object + description: schedules describes the status of each schedule. + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml deleted file mode 100644 index 5244e2b2203..00000000000 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backuptools.yaml +++ /dev/null @@ -1,330 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.12.1 - labels: - app.kubernetes.io/name: kubeblocks - name: backuptools.dataprotection.kubeblocks.io -spec: - group: dataprotection.kubeblocks.io - names: - categories: - - kubeblocks - kind: BackupTool - listKind: BackupToolList - plural: backuptools - singular: backuptool - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: BackupTool is the Schema for the backuptools API (defined by - provider) - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupToolSpec defines the desired state of BackupTool - properties: - backupCommands: - description: Array of command that apps can do database backup. from - invoke args the order of commands follows the order of array. - items: - type: string - type: array - deployKind: - default: job - description: 'which kind for run a backup tool, supported values: - job, statefulSet.' - enum: - - job - - statefulSet - type: string - env: - description: List of environment variables to set in the container. - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot - be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is - written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-preserve-unknown-fields: true - envFrom: - description: List of sources to populate environment variables in - the container. The keys defined within a source must be a C_IDENTIFIER. - All invalid keys will be reported as an event when the container - is starting. When a key exists in multiple sources, the value associated - with the last source will take precedence. Values defined by an - Env with a duplicate key will take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend to each key in - the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-preserve-unknown-fields: true - image: - description: Backup tool Container image name. - type: string - incrementalBackupCommands: - description: Array of command that apps can do database incremental - backup. like xtrabackup, that can performs an incremental backup - file. - items: - type: string - type: array - logical: - description: backup tool can support logical restore, in this case, - restore NOT RESTART database. - properties: - incrementalRestoreCommands: - description: Array of incremental restore commands. - items: - type: string - type: array - podScope: - default: All - description: 'podScope defines the pod scope for restore from - backup, supported values: - ''All'' will exec the restore command - on all pods. - ''ReadWrite'' will pick a ReadWrite pod to exec - the restore command.' - enum: - - All - - ReadWrite - type: string - restoreCommands: - description: Array of command that apps can perform database restore. - like xtrabackup, that can performs restore mysql from files. - items: - type: string - type: array - type: object - physical: - description: backup tool can support physical restore, in this case, - restore must be RESTART database. - properties: - incrementalRestoreCommands: - description: Array of incremental restore commands. - items: - type: string - type: array - relyOnLogfile: - description: relyOnLogfile defines whether the current recovery - relies on log files - type: boolean - restoreCommands: - description: Array of command that apps can perform database restore. - like xtrabackup, that can performs restore mysql from files. - items: - type: string - type: array - type: object - resources: - description: Compute Resources required by this container. Cannot - be updated. - properties: - claims: - description: "Claims lists the names of resources, defined in - spec.resourceClaims, that are used by this container. \n This - is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be set - for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims - of the Pod where this field is used. It makes that resource - available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - type: - default: file - description: the type of backup tool, file or pitr - enum: - - file - - pitr - type: string - required: - - backupCommands - - image - type: object - status: - description: BackupToolStatus defines the observed state of BackupTool - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml deleted file mode 100644 index 12cdf01405b..00000000000 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_restorejobs.yaml +++ /dev/null @@ -1,1795 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.12.1 - labels: - app.kubernetes.io/name: kubeblocks - name: restorejobs.dataprotection.kubeblocks.io -spec: - group: dataprotection.kubeblocks.io - names: - categories: - - kubeblocks - kind: RestoreJob - listKind: RestoreJobList - plural: restorejobs - singular: restorejob - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: STATUS - type: string - - jsonPath: .status.completionTimestamp - name: COMPLETION-TIME - type: date - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: RestoreJob is the Schema for the restorejobs API (defined by - User) - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: RestoreJobSpec defines the desired state of RestoreJob - properties: - backupJobName: - description: Specified one backupJob to restore. - type: string - onFailAttempted: - description: count of backup stop retries on fail. - format: int32 - type: integer - target: - description: the target database workload to restore - properties: - labelsSelector: - description: labelsSelector is used to find matching pods. Pods - that match this label selector are counted to determine the - number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - secret: - description: secret is used to connect to the target database - cluster. If not set, secret will be inherited from backup policy - template. if still not set, the controller will check if any - system account for dataprotection has been created. - properties: - name: - description: the secret name - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: passwordKey the map key of the password in the - connection credential secret - type: string - usernameKey: - default: username - description: usernameKey the map key of the user in the connection - credential secret - type: string - required: - - name - type: object - required: - - labelsSelector - type: object - targetVolumeMounts: - description: array of restore volume mounts . - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When - not set, MountPropagationNone is used. This field is beta - in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the - container's volume should be mounted. Behaves similarly to - SubPath but environment variable references $(VAR_NAME) are - expanded using the container's environment. Defaults to "" - (volume's root). SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - minItems: 1 - type: array - x-kubernetes-preserve-unknown-fields: true - targetVolumes: - description: array of restore volumes . - items: - description: Volume represents a named volume in a pod that may - be accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'awsElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - properties: - fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'readOnly value true will force the readOnly - setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'volumeID is unique ID of the persistent disk - resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - required: - - volumeID - type: object - azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'cachingMode is the Host Caching mode: None, - Read Only, Read Write.' - type: string - diskName: - description: diskName is the Name of the data disk in the - blob storage - type: string - diskURI: - description: diskURI is the URI of data disk in the blob - storage - type: string - fsType: - description: fsType is Filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single blob - disk per storage account Managed: azure managed data - disk (only in managed availability set). defaults to shared' - type: string - readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. - properties: - readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of secret that contains - Azure Storage Account Name and Key - type: string - shareName: - description: shareName is the azure share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime - properties: - monitors: - description: 'monitors is Required: Monitors is a collection - of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: - type: string - type: array - path: - description: 'path is Optional: Used as the mounted root, - rather than the full Ceph tree, default is /' - type: string - readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'secretFile is Optional: SecretFile is the - path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - secretRef: - description: 'secretRef is Optional: SecretRef is reference - to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: 'user is optional: User is the rados user name, - default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'cinder represents a cinder volume attached and - mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to - be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'secretRef is optional: points to a secret - object containing parameters used to connect to OpenStack.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - description: 'volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'defaultMode is optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: items if unspecified, each key-value pair in - the Data field of the referenced ConfigMap will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the ConfigMap, the volume setup will error unless it is - marked optional. Paths must be relative and may not contain - the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: optional specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). - properties: - driver: - description: driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated - CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: nodePublishSecretRef is a reference to the - secret object containing sensitive information to pass - to the CSI driver to complete the CSI NodePublishVolume - and NodeUnpublishVolume calls. This field is optional, - and may be empty if no secret is required. If the secret - object contains more than one secret, all secret references - are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: readOnly specifies a read-only configuration - for the volume. Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: volumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a Optional: mode bits used to set - permissions on created files by default. Must be an octal - value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are - supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: 'Optional: mode bits used to set permissions - on this file, must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path - name of the file to be created. Must not be absolute - or contain the ''..'' path. Must be utf-8 encoded. - The first item of the relative path must not start - with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - emptyDir: - description: 'emptyDir represents a temporary directory that - shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'medium represents what type of storage medium - should back this directory. The default is "" which means - to use the node''s default medium. Must be an empty string - (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: 'sizeLimit is the total amount of local storage - required for this EmptyDir volume. The size limit is also - applicable for memory medium. The maximum usage on memory - medium EmptyDir would be the minimum value between the - SizeLimit specified here and the sum of memory limits - of all containers in a pod. The default is nil which means - that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - description: "ephemeral represents a volume that is handled - by a cluster storage driver. The volume's lifecycle is tied - to the pod that defines it - it will be created before the - pod starts, and deleted when the pod is removed. \n Use this - if: a) the volume is only needed while the pod runs, b) features - of normal volumes like restoring from snapshot or capacity - tracking are needed, c) the storage driver is specified through - a storage class, and d) the storage driver supports dynamic - volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource - for more information on the connection between this volume - type and PersistentVolumeClaim). \n Use PersistentVolumeClaim - or one of the vendor-specific APIs for volumes that persist - for longer than the lifecycle of an individual pod. \n Use - CSI for light-weight local ephemeral volumes if the CSI driver - is meant to be used that way - see the documentation of the - driver for more information. \n A pod can use both types of - ephemeral volumes and persistent volumes at the same time." - properties: - volumeClaimTemplate: - description: "Will be used to create a stand-alone PVC to - provision the volume. The pod in which this EphemeralVolumeSource - is embedded will be the owner of the PVC, i.e. the PVC - will be deleted together with the pod. The name of the - PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. - Pod validation will reject the pod if the concatenated - name is not valid for a PVC (for example, too long). \n - An existing PVC with that name that is not owned by the - pod will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC - is meant to be used by the pod, the PVC has to updated - with an owner reference to the pod once the pod exists. - Normally this should not be necessary, but it may be useful - when manually reconstructing a broken cluster. \n This - field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. \n Required, must - not be nil." - properties: - metadata: - description: May contain labels and annotations that - will be copied into the PVC when creating it. No other - fields are allowed and will be rejected during validation. - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: The specification for the PersistentVolumeClaim. - The entire content is copied unchanged into the PVC - that gets created from this template. The same fields - as in a PersistentVolumeClaim are also valid here. - properties: - accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified - data source. When the AnyVolumeDataSource feature - gate is enabled, dataSource contents will be copied - to dataSourceRef, and dataSourceRef contents will - be copied to dataSource when dataSourceRef.namespace - is not specified. If the namespace is specified, - then dataSourceRef will not be copied to dataSource.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, if - a non-empty volume is desired. This may be any - object from a non-empty API group (non core object) - or a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. - This field will replace the functionality of the - dataSource field and as such if both fields are - non-empty, they must have the same value. For - backwards compatibility, when namespace isn''t - specified in dataSourceRef, both fields (dataSource - and dataSourceRef) will be set to the same value - automatically if one of them is empty and the - other is non-empty. When namespace is specified - in dataSourceRef, dataSource isn''t set to the - same value and must be empty. There are three - important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types - of objects, dataSourceRef allows any non-core - object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping - them), dataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - * While dataSource only allows local objects, - dataSourceRef allows objects in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled. (Alpha) Using the - namespace field of dataSourceRef requires the - CrossNamespaceVolumeDataSource feature gate to - be enabled.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - namespace: - description: Namespace is the namespace of resource - being referenced Note that when a namespace - is specified, a gateway.networking.k8s.io/ReferenceGrant - object is required in the referent namespace - to allow that namespace's owner to accept - the reference. See the ReferenceGrant documentation - for details. (Alpha) This field requires the - CrossNamespaceVolumeDataSource feature gate - to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity recorded - in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." - items: - description: ResourceClaim references one - entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: selector is a label query over volumes - to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: 'storageClassName is the name of the - StorageClass required by the claim. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel resource that is - attached to a kubelet's host machine and then exposed to the - pod. - properties: - fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. TODO: how do we prevent errors in the - filesystem from compromising the machine' - type: string - lun: - description: 'lun is Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: FC target worldwide - names (WWNs)' - items: - type: string - type: array - wwids: - description: 'wwids Optional: FC volume world wide identifiers - (wwids) Either wwids or combination of targetWWNs and - lun must be set, but not both simultaneously.' - items: - type: string - type: array - type: object - flexVolume: - description: flexVolume represents a generic volume resource - that is provisioned/attached using an exec based plugin. - properties: - driver: - description: driver is the name of the driver to use for - this volume. - type: string - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends - on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this field holds extra - command options if any.' - type: object - readOnly: - description: 'readOnly is Optional: defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'secretRef is Optional: secretRef is reference - to the secret object containing sensitive information - to pass to the plugin scripts. This may be empty if no - secret object is specified. If the secret object contains - more than one secret, all secrets are passed to the plugin - scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: datasetName is Name of the dataset stored as - metadata -> name on the dataset for Flocker should be - considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of the dataset. This - is unique identifier of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'gcePersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'fsType is filesystem type of the volume that - you want to mount. Tip: Ensure that the filesystem type - is supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'pdName is unique name of the PD resource in - GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'gitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision - a container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: directory is the target directory name. Must - not contain or start with '..'. If '.' is supplied, the - volume directory will be the git repository. Otherwise, - if specified, the volume will contain the git repository - in the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash for the specified - revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'glusterfs represents a Glusterfs mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'endpoints is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'readOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to - false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'hostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'path of the directory on the host. If the - path is a symlink, it will follow the link to the real - path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'iscsi represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to - the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines whether support iSCSI - Discovery CHAP authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether support iSCSI - Session CHAP authentication - type: boolean - fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - initiatorName: - description: initiatorName is the custom iSCSI Initiator - Name. If initiatorName is specified with iscsiInterface - simultaneously, new iSCSI interface : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iscsiInterface is the interface Name that uses - an iSCSI transport. Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: portals is the iSCSI Target Portal List. The - portal is either an IP or ip_addr:port if the port is - other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - readOnly: - description: readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret for iSCSI target - and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - description: targetPortal is iSCSI Target Portal. The Portal - is either an IP or ip_addr:port if the port is other than - default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'name of the volume. Must be a DNS_LABEL and unique - within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'nfs represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'readOnly here will force the NFS export to - be mounted with read-only permissions. Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'persistentVolumeClaimVolumeSource represents a - reference to a PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'claimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: readOnly Will force the ReadOnly setting in - VolumeMounts. Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - pdID: - description: pdID is the ID that identifies Photon Controller - persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one resources secrets, - configmaps, and downward API - properties: - defaultMode: - description: defaultMode are the mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Directories within the path are - not affected by this setting. This might be in conflict - with other options that affect the file mode, like fsGroup, - and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: sources is the list of volume projections - items: - description: Projection that may be projected along with - other supported volume types - properties: - configMap: - description: configMap information about the configMap - data to project - properties: - items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the ConfigMap, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about the downwardAPI - data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, - defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: 'Optional: mode bits used to - set permissions on this file, must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' - path. Must be utf-8 encoded. The first - item of the relative path must not start - with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu - and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults - to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to - select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - secret: - description: secret information about the secret data - to project - properties: - items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the Secret, the volume setup - will error unless it is marked optional. Paths - must be relative and may not contain the '..' - path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: optional field specify whether the - Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information about - the serviceAccountToken data to project - properties: - audience: - description: audience is the intended audience - of the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, the - kubelet volume plugin will proactively rotate - the service account token. The kubelet will - start trying to rotate the token if the token - is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. - format: int64 - type: integer - path: - description: path is the path relative to the - mount point of the file to project the token - into. - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime - properties: - group: - description: group to map volume access to Default is no - group - type: string - readOnly: - description: readOnly here will force the Quobyte volume - to be mounted with read-only permissions. Defaults to - false. - type: boolean - registry: - description: registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: user to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'rbd represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from - compromising the machine' - type: string - image: - description: 'image is the rados image name. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - items: - type: string - type: array - pool: - description: 'pool is the rados pool name. Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'secretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is - nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: 'user is the rados user name. Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. - properties: - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: gateway is the host address of the ScaleIO - API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name of the ScaleIO - Protection Domain for the configured storage. - type: string - readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: secretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - description: sslEnabled Flag enable/disable SSL communication - with Gateway, default false - type: boolean - storageMode: - description: storageMode indicates whether the storage for - a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO Storage Pool associated - with the protection domain. - type: string - system: - description: system is the name of the storage system as - configured in ScaleIO. - type: string - volumeName: - description: volumeName is the name of a volume already - created in the ScaleIO system that is associated with - this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'defaultMode is Optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: items If unspecified, each key-value pair in - the Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the Secret, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. - type: string - required: - - key - - path - type: object - type: array - optional: - description: optional field specify whether the Secret or - its keys must be defined - type: boolean - secretName: - description: 'secretName is the name of the secret in the - pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: secretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - description: volumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within - a namespace. - type: string - volumeNamespace: - description: volumeNamespace specifies the scope of the - volume within StorageOS. If no namespace is specified - then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS - for tighter integration. Set VolumeName to any name to - override the default behaviour. Set to "default" if you - are not using namespaces within StorageOS. Namespaces - that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: fsType is filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage Policy Based - Management (SPBM) profile ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the storage Policy Based - Management (SPBM) profile name. - type: string - volumePath: - description: volumePath is the path that identifies vSphere - volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - minItems: 1 - type: array - x-kubernetes-preserve-unknown-fields: true - required: - - backupJobName - - target - - targetVolumeMounts - - targetVolumes - type: object - status: - description: RestoreJobStatus defines the observed state of RestoreJob - properties: - completionTimestamp: - description: Date/time when the backup finished being processed. - format: date-time - type: string - expiration: - description: The date and time when the Backup is eligible for garbage - collection. 'null' means the Backup is NOT be cleaned except delete - manual. - format: date-time - type: string - failureReason: - description: Job failed reason. - type: string - phase: - description: RestoreJobPhase The current phase. Valid values are New, - InProgressPhy, InProgressLogic, Completed, Failed. - enum: - - New - - InProgressPhy - - InProgressLogic - - Completed - - Failed - type: string - startTimestamp: - description: Date/time when the backup started being processed. - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_restores.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_restores.yaml new file mode 100644 index 00000000000..a36a9f042d4 --- /dev/null +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_restores.yaml @@ -0,0 +1,2522 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: restores.dataprotection.kubeblocks.io +spec: + group: dataprotection.kubeblocks.io + names: + categories: + - kubeblocks + - all + kind: Restore + listKind: RestoreList + plural: restores + singular: restore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backup.name + name: BACKUP + type: string + - description: Point in time for restoring + jsonPath: .spec.restoreTime + name: RESTORE-TIME + type: string + - description: Restore Status. + jsonPath: .status.phase + name: STATUS + type: string + - jsonPath: .status.duration + name: DURATION + type: string + - jsonPath: .metadata.creationTimestamp + name: CREATE-TIME + type: string + - jsonPath: .status.completionTimestamp + name: COMPLETION-TIME + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Restore is the Schema for the restores API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RestoreSpec defines the desired state of Restore + properties: + backup: + description: 'backup name, the following behavior based on the backup + type: 1. Full: will be restored the full backup directly. 2. Incremental: + will be restored sequentially from the most recent full backup of + this incremental backup. 3. Differential: will be restored sequentially + from the parent backup of the differential backup. 4. Continuous: + will find the most recent full backup at this time point and the + input continuous backup to restore.' + properties: + name: + description: backup name + type: string + namespace: + description: backup namespace + type: string + required: + - name + - namespace + type: object + x-kubernetes-validations: + - message: forbidden to update spec.backupName + rule: self == oldSelf + containerResources: + description: specified the required resources of restore job's container. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + env: + description: 'list of environment variables to set in the container + for restore and will be merged with the env of Backup and ActionSet. + the priority of merging is as follows: Restore env > Backup env + > ActionSet env.' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the + string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + prepareDataConfig: + description: configuration for the action of "prepareData" phase, + including the persistent volume claims that need to be restored + and scheduling strategy of temporary recovery pod. + properties: + dataSourceRef: + description: dataSourceRef describes the configuration when using + `persistentVolumeClaim.spec.dataSourceRef` method for restoring. + it describes the source volume of the backup targetVolumes and + how to mount path in the restoring container. + properties: + mountPath: + description: mountPath path within the restoring container + at which the volume should be mounted. + type: string + volumeSource: + description: volumeSource describes the volume will be restored + from the specified volume of the backup targetVolumes. required + if the backup uses volume snapshot. + type: string + type: object + x-kubernetes-validations: + - message: at least one exists for volumeSource and mountPath. + rule: self.volumeSource != '' || self.mountPath !='' + - message: forbidden to update spec.prepareDataConfig.dataSourceRef + rule: self == oldSelf + schedulingSpec: + description: scheduling spec for restoring pod. + properties: + affinity: + description: affinity is a group of affinity scheduling rules. + refer to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term + matches all objects with implicit weight 0 (i.e. + it's a no-op). A null preferred scheduling term + matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to an update), the system may or may not try + to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to a pod label update), the system may or may + not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to + the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + anti-affinity requirements specified by this field + cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may + or may not try to eventually evict the pod from + its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeName: + description: nodeName is a request to schedule this pod onto + a specific node. If it is non-empty, the scheduler simply + schedules this pod onto that node, assuming that it fits + resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match + a node''s labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + schedulerName: + description: If specified, the pod will be dispatched by specified + scheduler. If not specified, the pod will be dispatched + by default scheduler. + type: string + tolerations: + description: the restoring pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: topologySpreadConstraints describes how a group + of pods ought to spread across topology domains. Scheduler + will schedule pods in a way which abides by the constraints. + refer to https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching + pods. Pods that match this label selector are counted + to determine the number of pods in their corresponding + topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys + to select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are ANDed with + labelSelector to select the group of existing pods + over which spreading will be calculated for the incoming + pod. The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. MatchLabelKeys cannot be set when + LabelSelector isn't set. Keys that don't exist in + the incoming pod labels will be ignored. A null or + empty list means only match against labelSelector. + \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which + pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the + number of matching pods in the target topology and + the global minimum. The global minimum is the minimum + number of matching pods in an eligible domain or zero + if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to + 1, and pods with the same labelSelector spread as + 2/2/1: In this case, the global minimum is 1. | zone1 + | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 + to become 2/2/2; scheduling it onto zone1(zone2) would + make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto + any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default value + is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number + of eligible domains. When the number of eligible domains + with matching topology keys is less than minDomains, + Pod Topology Spread treats \"global minimum\" as 0, + and then the calculation of Skew is performed. And + when the number of eligible domains with matching + topology keys equals or greater than minDomains, this + value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to + those domains. If value is nil, the constraint behaves + as if MinDomains is equal to 1. Valid values are integers + greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone + cluster, MaxSkew is set to 2, MinDomains is set to + 5 and pods with the same labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), + so \"global minimum\" is treated as 0. In this situation, + new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod + is scheduled to any of the three zones, it will violate + MaxSkew. \n This is a beta field and requires the + MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will + treat Pod's nodeAffinity/nodeSelector when calculating + pod topology spread skew. Options are: - Honor: only + nodes matching nodeAffinity/nodeSelector are included + in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + \n If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will + treat node taints when calculating pod topology spread + skew. Options are: - Honor: nodes without taints, + along with tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: node taints + are ignored. All nodes are included. \n If this value + is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", and try + to put balanced number of pods into each bucket. We + define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose + nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal + with a pod if it doesn''t satisfy the spread constraint. + - DoNotSchedule (default) tells the scheduler not + to schedule it. - ScheduleAnyway tells the scheduler + to schedule the pod in any location, but giving higher + precedence to topologies that would help reduce the + skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node + assignment for that pod would violate "MaxSkew" on + some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector + spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P + | P | P | If WhenUnsatisfiable is set to DoNotSchedule, + incoming pod can only be scheduled to zone2(zone3) + to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) + satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make + it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-validations: + - message: forbidden to update spec.prepareDataConfig.schedulingSpec + rule: self == oldSelf + volumeClaimManagementPolicy: + default: Parallel + description: 'VolumeClaimManagementPolicy defines recovery strategy + for persistent volume claim. supported policies are as follows: + 1. Parallel: parallel recovery of persistent volume claim. 2. + Serial: restore the persistent volume claim in sequence, and + wait until the previous persistent volume claim is restored + before restoring a new one.' + enum: + - Parallel + - Serial + type: string + volumeClaims: + description: volumeClaims defines the persistent Volume claims + that need to be restored and mount them together into the restore + job. these persistent Volume claims will be created if not exist. + items: + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + mountPath: + description: mountPath path within the restoring container + at which the volume should be mounted. + type: string + volumeClaimSpec: + description: volumeClaimSpec defines the desired characteristics + of a persistent volume claim. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on + the contents of the specified data source. When the + AnyVolumeDataSource feature gate is enabled, dataSource + contents will be copied to dataSourceRef, and dataSourceRef + contents will be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, then + dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object from + which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the dataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t specified + in dataSourceRef, both fields (dataSource and dataSourceRef) + will be set to the same value automatically if one + of them is empty and the other is non-empty. When + namespace is specified in dataSourceRef, dataSource + isn''t set to the same value and must be empty. There + are three important differences between dataSource + and dataSourceRef: * While dataSource only allows + two specific types of objects, dataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + * While dataSource only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) Using this + field requires the AnyVolumeDataSource feature gate + to be enabled. (Alpha) Using the namespace field of + dataSourceRef requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is + specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace to + allow that namespace's owner to accept the reference. + See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the + status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field and + requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can + only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the + Pod where this field is used. It makes that + resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + volumeSource: + description: volumeSource describes the volume will be restored + from the specified volume of the backup targetVolumes. + required if the backup uses volume snapshot. + type: string + required: + - metadata + - volumeClaimSpec + type: object + x-kubernetes-validations: + - message: at least one exists for volumeSource and mountPath. + rule: self.volumeSource != '' || self.mountPath !='' + type: array + x-kubernetes-validations: + - message: forbidden to update spec.prepareDataConfig.volumeClaims + rule: self == oldSelf + volumeClaimsTemplate: + description: volumeClaimsTemplate defines a template to build + persistent Volume claims that need to be restored. these claims + will be created in an orderly manner based on the number of + replicas or reused if already exist. + properties: + replicas: + description: the replicas of persistent volume claim which + need to be created and restored. the format of created claim + name is "-". + format: int32 + minimum: 1 + type: integer + startingIndex: + description: the starting index for the created persistent + volume claim by according to template. minimum is 0. + format: int32 + minimum: 0 + type: integer + templates: + description: templates is a list of volume claims. + items: + properties: + metadata: + description: 'Standard object''s metadata. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + mountPath: + description: mountPath path within the restoring container + at which the volume should be mounted. + type: string + volumeClaimSpec: + description: volumeClaimSpec defines the desired characteristics + of a persistent volume claim. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + volumeSource: + description: volumeSource describes the volume will + be restored from the specified volume of the backup + targetVolumes. required if the backup uses volume + snapshot. + type: string + required: + - metadata + - volumeClaimSpec + type: object + x-kubernetes-validations: + - message: at least one exists for volumeSource and mountPath. + rule: self.volumeSource != '' || self.mountPath !='' + type: array + required: + - replicas + - templates + type: object + x-kubernetes-validations: + - message: forbidden to update spec.prepareDataConfig.volumeClaimsTemplate + rule: self == oldSelf + required: + - volumeClaimManagementPolicy + type: object + readyConfig: + description: configuration for the action of "postReady" phase. + properties: + connectCredential: + description: credential template used for creating a connection + credential + properties: + hostKey: + default: host + description: hostKey the map key of the host in the connection + credential secret + type: string + passwordKey: + default: password + description: passwordKey the map key of the password in the + connection credential secret + type: string + portKey: + default: port + description: portKey the map key of the port in the connection + credential secret + type: string + secretName: + description: the secret name + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + usernameKey: + default: username + description: usernameKey the map key of the user in the connection + credential secret + type: string + required: + - passwordKey + - secretName + - usernameKey + type: object + execAction: + description: configuration for exec action. + properties: + target: + description: execActionTarget defines the pods that need to + be executed for the exec action. will execute on all pods + that meet the conditions. + properties: + podSelector: + description: kubectl exec in all selected pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - podSelector + type: object + type: object + jobAction: + description: configuration for job action. + properties: + target: + description: jobActionTarget defines the pod that need to + be executed for the job action. will select a pod that meets + the conditions to execute. + properties: + podSelector: + description: select one of the pods which selected by + labels to build the job spec, such as mount required + volumes and inject built-in env of the selected pod. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + volumeMounts: + description: volumeMounts defines which volumes of the + selected pod need to be mounted on the restoring pod. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + required: + - podSelector + type: object + required: + - target + type: object + readinessProbe: + description: periodic probe of the service readiness. controller + will perform postReadyHooks of BackupScript.spec.restore after + the service readiness when readinessProbe is configured. + properties: + exec: + description: exec specifies the action to take. + properties: + command: + description: refer to container command. + items: + type: string + type: array + image: + description: refer to container image. + type: string + required: + - command + - image + type: object + initialDelaySeconds: + description: number of seconds after the container has started + before probe is initiated. + minimum: 0 + type: integer + periodSeconds: + default: 5 + description: how often (in seconds) to perform the probe. + defaults to 5 second, minimum value is 1. + minimum: 1 + type: integer + timeoutSeconds: + default: 30 + description: number of seconds after which the probe times + out. defaults to 30 second, minimum value is 1. + minimum: 1 + type: integer + required: + - exec + type: object + type: object + x-kubernetes-validations: + - message: at least one exists for jobAction and execAction. + rule: has(self.jobAction) || has(self.execAction) + resources: + description: restore the specified resources of kubernetes. + properties: + included: + description: will restore the specified resources + items: + properties: + groupResource: + type: string + labelSelector: + description: select the specified resource for recovery + by label. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - groupResource + type: object + type: array + type: object + x-kubernetes-validations: + - message: forbidden to update spec.resources + rule: self == oldSelf + restoreTime: + description: restore according to a specified point in time. + pattern: ^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$ + type: string + x-kubernetes-validations: + - message: forbidden to update spec.restoreTime + rule: self == oldSelf + serviceAccountName: + description: service account name which needs for recovery pod. + type: string + required: + - backup + type: object + status: + description: RestoreStatus defines the observed state of Restore + properties: + actions: + description: recorded all restore actions performed. + properties: + postReady: + description: record the actions for postReady phase. + items: + properties: + backupName: + description: which backup's restore action belongs to. + type: string + endTime: + description: endTime is the completion time for the restore + job. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the object condition. + type: string + name: + description: name describes the name of the recovery action + based on the current backup. + type: string + objectKey: + description: the execution object of the restore action. + type: string + startTime: + description: startTime is the start time for the restore + job. + format: date-time + type: string + status: + description: the status of this action. + enum: + - Processing + - Completed + - Failed + type: string + required: + - backupName + - name + - objectKey + type: object + type: array + prepareData: + description: record the actions for prepareData phase. + items: + properties: + backupName: + description: which backup's restore action belongs to. + type: string + endTime: + description: endTime is the completion time for the restore + job. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the object condition. + type: string + name: + description: name describes the name of the recovery action + based on the current backup. + type: string + objectKey: + description: the execution object of the restore action. + type: string + startTime: + description: startTime is the start time for the restore + job. + format: date-time + type: string + status: + description: the status of this action. + enum: + - Processing + - Completed + - Failed + type: string + required: + - backupName + - name + - objectKey + type: object + type: array + type: object + completionTimestamp: + description: Date/time when the restore finished being processed. + format: date-time + type: string + conditions: + description: describe current state of restore API Resource, like + warning. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + duration: + description: The duration time of restore execution. When converted + to a string, the form is "1h2m0.5s". + type: string + phase: + description: RestorePhase The current phase. Valid values are Running, + Completed, Failed, Deleting. + enum: + - Running + - Completed + - Failed + - Deleting + type: string + startTimestamp: + description: Date/time when the restore started being processed. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/helm/templates/rbac/dataprotection_restorejob_editor_role.yaml b/deploy/helm/templates/rbac/dataprotection_restore_editor_role.yaml similarity index 69% rename from deploy/helm/templates/rbac/dataprotection_restorejob_editor_role.yaml rename to deploy/helm/templates/rbac/dataprotection_restore_editor_role.yaml index e9464749c2d..1711d4f4f70 100644 --- a/deploy/helm/templates/rbac/dataprotection_restorejob_editor_role.yaml +++ b/deploy/helm/templates/rbac/dataprotection_restore_editor_role.yaml @@ -1,15 +1,15 @@ -# permissions for end users to edit restorejobs. +# permissions for end users to edit restores. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "kubeblocks.fullname" . }}-restorejob-editor-role + name: {{ include "kubeblocks.fullname" . }}-restore-editor-role labels: {{- include "kubeblocks.labels" . | nindent 4 }} rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs + - restores verbs: - create - delete @@ -21,6 +21,6 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/status + - restores/status verbs: - get diff --git a/deploy/helm/templates/rbac/dataprotection_restorejob_viewer_role.yaml b/deploy/helm/templates/rbac/dataprotection_restore_viewer_role.yaml similarity index 66% rename from deploy/helm/templates/rbac/dataprotection_restorejob_viewer_role.yaml rename to deploy/helm/templates/rbac/dataprotection_restore_viewer_role.yaml index ca90687a7b7..3230f6daec8 100644 --- a/deploy/helm/templates/rbac/dataprotection_restorejob_viewer_role.yaml +++ b/deploy/helm/templates/rbac/dataprotection_restore_viewer_role.yaml @@ -1,15 +1,15 @@ -# permissions for end users to view restorejobs. +# permissions for end users to view restores. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "kubeblocks.fullname" . }}-restorejob-viewer-role + name: {{ include "kubeblocks.fullname" . }}-restore-viewer-role labels: {{- include "kubeblocks.labels" . | nindent 4 }} rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs + - restores verbs: - get - list @@ -17,6 +17,6 @@ rules: - apiGroups: - dataprotection.kubeblocks.io resources: - - restorejobs/status + - restores/status verbs: - get diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml index 5aa7c353aae..2faf409e0ed 100644 --- a/deploy/helm/values.yaml +++ b/deploy/helm/values.yaml @@ -217,7 +217,7 @@ resources: {} # memory: 128Mi # requests: # cpu: 10m - # memory: 64Mi +# memory: 64Mi ## @param priorityClassName ## @@ -246,10 +246,10 @@ nodeSelector: {} ## @param tolerations ## tolerations: - - key: kb-controller - operator: Equal - value: "true" - effect: NoSchedule +- key: kb-controller + operator: Equal + value: "true" + effect: NoSchedule ## @param affinity @@ -257,33 +257,33 @@ tolerations: affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: - matchExpressions: - - key: kb-controller - operator: In - values: - - "true" + - weight: 100 + preference: + matchExpressions: + - key: kb-controller + operator: In + values: + - "true" ## @param data plane settings ## dataPlane: tolerations: - - key: kb-data - operator: Equal - value: "true" - effect: NoSchedule + - key: kb-data + operator: Equal + value: "true" + effect: NoSchedule affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: - matchExpressions: - - key: kb-data - operator: In - values: - - "true" + - weight: 100 + preference: + matchExpressions: + - key: kb-data + operator: In + values: + - "true" ## AdmissionWebhooks settings ## @@ -321,7 +321,7 @@ dataProtection: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. tag: "" - imagePullSecrets: [ ] + imagePullSecrets: [] ## BackupRepo settings ## @@ -346,10 +346,10 @@ backupRepo: accessKeyId: "" secretAccessKey: "" -## Addon controller settings, this will require cluster-admin clusterrole. -## -## @param addonController.enabled -## @param addonController.jobTTL - is addon job time-to-live period, this value is time.Duration-parseable string. + ## Addon controller settings, this will require cluster-admin clusterrole. + ## + ## @param addonController.enabled + ## @param addonController.jobTTL - is addon job time-to-live period, this value is time.Duration-parseable string. ## default value is "5m" if not provided. ## @param addonController.jobImagePullPolicy - addon install job image pull policy. addonController: @@ -377,10 +377,10 @@ addonChartsImage: ## @param addonHelmInstallOptions - addon helm install options. addonHelmInstallOptions: - - "--atomic" - - "--cleanup-on-fail" - - "--wait" - - "--insecure-skip-tls-verify" +- "--atomic" +- "--cleanup-on-fail" +- "--wait" +- "--insecure-skip-tls-verify" ## Prometheus Addon ## @@ -410,21 +410,21 @@ prometheus: ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: - - key: kb-controller - operator: Equal - value: "true" - effect: NoSchedule + - key: kb-controller + operator: Equal + value: "true" + effect: NoSchedule affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: - matchExpressions: - - key: kb-controller - operator: In - values: - - "true" + - weight: 100 + preference: + matchExpressions: + - key: kb-controller + operator: In + values: + - "true" persistentVolume: ## If true, alertmanager will create/use a Persistent Volume Claim @@ -471,7 +471,7 @@ prometheus: # memory: 32Mi # requests: # cpu: 10m - # memory: 32Mi + # memory: 32Mi ## Security context to be added to alertmanager pods ## @@ -607,8 +607,8 @@ prometheus: ## Additional Prometheus server container flags ## extraFlags: - - web.enable-lifecycle - - web.enable-remote-write-receiver + - web.enable-lifecycle + - web.enable-remote-write-receiver ## Additional Prometheus server container arguments ## @@ -631,21 +631,21 @@ prometheus: ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: - - key: kb-controller - operator: Equal - value: "true" - effect: NoSchedule + - key: kb-controller + operator: Equal + value: "true" + effect: NoSchedule affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: - matchExpressions: - - key: kb-controller - operator: In - values: - - "true" + - weight: 100 + preference: + matchExpressions: + - key: kb-controller + operator: In + values: + - "true" persistentVolume: ## If true, Prometheus server will create/use a Persistent Volume Claim @@ -685,7 +685,7 @@ prometheus: # memory: 512Mi # requests: # cpu: 500m - # memory: 512Mi + # memory: 512Mi ## Prometheus' data retention period (default if not specified is 15 days) ## @@ -790,10 +790,10 @@ prometheus: ## alertmanagerFiles: alertmanager.yml: - global: { } + global: {} receivers: - - name: default-receiver + - name: default-receiver route: receiver: default-receiver @@ -1306,117 +1306,117 @@ prometheus: serverFiles: prometheus.yml: rule_files: - - /etc/config/recording_rules.yml - - /etc/config/alerting_rules.yml - - /etc/config/kubelet_alert_rules.yml - - /etc/config/mysql_alert_rules.yml - - /etc/config/postgresql_alert_rules.yml - - /etc/config/redis_alert_rules.yml - - /etc/config/kafka_alert_rules.yml - - /etc/config/mongodb_alert_rules.yml + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + - /etc/config/kubelet_alert_rules.yml + - /etc/config/mysql_alert_rules.yml + - /etc/config/postgresql_alert_rules.yml + - /etc/config/redis_alert_rules.yml + - /etc/config/kafka_alert_rules.yml + - /etc/config/mongodb_alert_rules.yml scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - # Scrape config for kubeblocks managed service endpoints. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `monitor.kubeblocks.io/scrape`: Only scrape services that have a value of - # `true`. - # * `monitor.kubeblocks.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `monitor.kubeblocks.io/path`: If the metrics path is not `/metrics` override this. - # * `monitor.kubeblocks.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - # * `monitor.kubeblocks.io/param_`: If the metrics endpoint uses parameters - # then you can set any parameter - - job_name: 'kubeblocks-service' - honor_labels: true - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [ __meta_kubernetes_service_label_app_kubernetes_io_managed_by ] - action: keep - regex: kubeblocks - - source_labels: [ __meta_kubernetes_service_label_monitor_kubeblocks_io_managed_by ] - action: drop - regex: agamotto - - source_labels: [ __meta_kubernetes_service_annotation_monitor_kubeblocks_io_scrape ] - action: keep - regex: true - - source_labels: [ __meta_kubernetes_service_annotation_monitor_kubeblocks_io_scheme ] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [ __meta_kubernetes_service_annotation_monitor_kubeblocks_io_path ] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [ __address__, __meta_kubernetes_service_annotation_monitor_kubeblocks_io_port ] - action: replace - target_label: __address__ - regex: (.+?)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_annotation_monitor_kubeblocks_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [ __meta_kubernetes_namespace ] - action: replace - target_label: namespace - - source_labels: [ __meta_kubernetes_service_name ] - action: replace - target_label: service - - source_labels: [ __meta_kubernetes_pod_node_name ] - action: replace - target_label: node - - source_labels: [ __meta_kubernetes_pod_name ] - action: replace - target_label: pod - - source_labels: [ __meta_kubernetes_pod_phase ] - regex: Pending|Succeeded|Failed|Completed - action: drop - - - job_name: 'kubeblocks-agamotto' - honor_labels: true - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [ __meta_kubernetes_service_label_monitor_kubeblocks_io_managed_by ] - action: keep - regex: agamotto - - source_labels: [ __meta_kubernetes_service_annotation_monitor_kubeblocks_io_scrape ] - action: keep - regex: true - - source_labels: [ __meta_kubernetes_service_annotation_monitor_kubeblocks_io_scheme ] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [ __meta_kubernetes_service_annotation_monitor_kubeblocks_io_path ] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [ __address__, __meta_kubernetes_service_annotation_monitor_kubeblocks_io_port ] - action: replace - target_label: __address__ - regex: (.+?)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_annotation_monitor_kubeblocks_io_param_(.+) - replacement: __param_$1 - - source_labels: [ __meta_kubernetes_pod_phase ] - regex: Pending|Succeeded|Failed|Completed - action: drop + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # Scrape config for kubeblocks managed service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `monitor.kubeblocks.io/scrape`: Only scrape services that have a value of + # `true`. + # * `monitor.kubeblocks.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `monitor.kubeblocks.io/path`: If the metrics path is not `/metrics` override this. + # * `monitor.kubeblocks.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `monitor.kubeblocks.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubeblocks-service' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_managed_by] + action: keep + regex: kubeblocks + - source_labels: [__meta_kubernetes_service_label_monitor_kubeblocks_io_managed_by] + action: drop + regex: agamotto + - source_labels: [__meta_kubernetes_service_annotation_monitor_kubeblocks_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_monitor_kubeblocks_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_monitor_kubeblocks_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_monitor_kubeblocks_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_monitor_kubeblocks_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + + - job_name: 'kubeblocks-agamotto' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_label_monitor_kubeblocks_io_managed_by] + action: keep + regex: agamotto + - source_labels: [__meta_kubernetes_service_annotation_monitor_kubeblocks_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_monitor_kubeblocks_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_monitor_kubeblocks_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_monitor_kubeblocks_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_monitor_kubeblocks_io_param_(.+) + replacement: __param_$1 + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop pushgateway: ## If false, pushgateway will not be installed @@ -1489,27 +1489,27 @@ grafana: # memory: 128Mi # requests: # cpu: 100m - # memory: 128Mi + # memory: 128Mi ## Node tolerations for grafana scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: - - key: kb-controller - operator: Equal - value: "true" - effect: NoSchedule + - key: kb-controller + operator: Equal + value: "true" + effect: NoSchedule affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: - matchExpressions: - - key: kb-controller - operator: In - values: - - "true" + - weight: 100 + preference: + matchExpressions: + - key: kb-controller + operator: In + values: + - "true" ## Timezone for the default dashboards ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg @@ -1573,7 +1573,7 @@ grafana: pathType: Prefix hosts: - - chart-example.local + - chart-example.local ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. extraPaths: [] # - path: /* @@ -1631,26 +1631,26 @@ snapshot-controller: tag: v6.2.1 tolerations: - - key: kb-controller - operator: Equal - value: "true" - effect: NoSchedule + - key: kb-controller + operator: Equal + value: "true" + effect: NoSchedule volumeSnapshotClasses: - - name: default-vsc - driver: hostpath.csi.k8s.io - deletionPolicy: Delete + - name: default-vsc + driver: hostpath.csi.k8s.io + deletionPolicy: Delete affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: - matchExpressions: - - key: kb-controller - operator: In - values: - - "true" + - weight: 100 + preference: + matchExpressions: + - key: kb-controller + operator: In + values: + - "true" kubeblocks-csi-driver: enabled: false @@ -1690,13 +1690,13 @@ alertmanager-webhook-adaptor: affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: - matchExpressions: - - key: kb-controller - operator: In - values: - - "true" + - weight: 100 + preference: + matchExpressions: + - key: kb-controller + operator: In + values: + - "true" ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.configMapOverrideName}} ## @@ -1704,7 +1704,7 @@ alertmanager-webhook-adaptor: ## Webhook-Adaptor ConfigMap Entries configFiles: - config.yaml: { } + config.yaml: {} csi-hostpath-driver: ## @param csi-hostpath-driver.enabled -- Enable csi-hostpath-driver chart. @@ -1774,8 +1774,8 @@ storageClass: ## create: true mountOptions: - - noatime - - nobarrier + - noatime + - nobarrier provider: aws: volumeType: gp3 diff --git a/deploy/milvus/templates/backuppolicytemplate.yaml b/deploy/milvus/templates/backuppolicytemplate.yaml index c1be0359c64..3f91dcd599d 100644 --- a/deploy/milvus/templates/backuppolicytemplate.yaml +++ b/deploy/milvus/templates/backuppolicytemplate.yaml @@ -9,14 +9,14 @@ spec: clusterDefinitionRef: milvus backupPolicies: - componentDefRef: milvus - retention: - ttl: 7d - schedule: - snapshot: - enable: false - cronExpression: "0 18 * * 0" - snapshot: - target: - connectionCredentialKey: - passwordKey: password - usernameKey: username \ No newline at end of file + retentionPeriod: 7d + backupMethods: + - name: volume-snapshot + snapshotVolumes: true + targetVolumes: + volumes: + - data + schedules: + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * 0" \ No newline at end of file diff --git a/deploy/mongodb/dataprotection/backup-info-collector.sh b/deploy/mongodb/dataprotection/backup-info-collector.sh index 8322366235f..e3d2b63d4d3 100644 --- a/deploy/mongodb/dataprotection/backup-info-collector.sh +++ b/deploy/mongodb/dataprotection/backup-info-collector.sh @@ -1,6 +1,6 @@ function get_current_time() { CLIENT=`which mongosh>/dev/null&&echo mongosh||echo mongo` - curr_time=$(${CLIENT} -u ${DB_USER} -p ${DB_PASSWORD} --port 27017 --host ${DB_HOST} --authenticationDatabase admin --eval 'db.isMaster().lastWrite.lastWriteDate.getTime()/1000' --quiet) + curr_time=$(${CLIENT} -u ${DP_DB_USER} -p ${DP_DB_PASSWORD} --port 27017 --host ${DP_DB_HOST} --authenticationDatabase admin --eval 'db.isMaster().lastWrite.lastWriteDate.getTime()/1000' --quiet) curr_time=$(date -d "@${curr_time}" -u '+%Y-%m-%dT%H:%M:%SZ') echo $curr_time } @@ -11,6 +11,6 @@ function stat_and_save_backup_info() { if [ -z $STOP_TIME ]; then STOP_TIME=`get_current_time` fi - TOTAL_SIZE=$(du -shx ${BACKUP_DIR}|awk '{print $1}') - echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupLog\":{\"startTime\":\"${START_TIME}\",\"stopTime\":\"${STOP_TIME}\"},\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info + TOTAL_SIZE=$(du -shx ${DP_BACKUP_DIR}|awk '{print $1}') + echo "{\"totalSize\":\"$TOTAL_SIZE\",\"timeRange\":{\"start\":\"${START_TIME}\",\"end\":\"${STOP_TIME}\"}}" > ${DP_BACKUP_DIR}/backup.info } \ No newline at end of file diff --git a/deploy/mongodb/dataprotection/datafile-backup.sh b/deploy/mongodb/dataprotection/datafile-backup.sh new file mode 100644 index 00000000000..d5eee06e666 --- /dev/null +++ b/deploy/mongodb/dataprotection/datafile-backup.sh @@ -0,0 +1,10 @@ +if [ -d ${DP_BACKUP_DIR} ]; then + rm -rf ${DP_BACKUP_DIR} +fi +mkdir -p ${DP_BACKUP_DIR} && cd ${DATA_DIR} +START_TIME=`get_current_time` +# TODO: flush data and locked write, otherwise data maybe inconsistent +tar -czvf ${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.tar.gz ./ +rm -rf mongodb.backup +# stat and save the backup information +stat_and_save_backup_info $START_TIME \ No newline at end of file diff --git a/deploy/mongodb/dataprotection/datafile-restore.sh b/deploy/mongodb/dataprotection/datafile-restore.sh new file mode 100644 index 00000000000..a527c4b9528 --- /dev/null +++ b/deploy/mongodb/dataprotection/datafile-restore.sh @@ -0,0 +1,12 @@ +set -e +mkdir -p ${DATA_DIR} +res=`ls -A ${DATA_DIR}` +data_protection_file=${DATA_DIR}/.kb-data-protection +if [ ! -z "${res}" ] && [ ! -f ${data_protection_file} ]; then + echo "${DATA_DIR} is not empty! Please make sure that the directory is empty before restoring the backup." + exit 1 +fi +cd ${DATA_DIR} && touch mongodb.backup +touch ${data_protection_file} +tar -xvf ${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.tar.gz -C ${DATA_DIR} +rm -rf ${data_protection_file} && sync \ No newline at end of file diff --git a/deploy/mongodb/dataprotection/mongodump-backup.sh b/deploy/mongodb/dataprotection/mongodump-backup.sh new file mode 100644 index 00000000000..0bae45552c2 --- /dev/null +++ b/deploy/mongodb/dataprotection/mongodump-backup.sh @@ -0,0 +1,12 @@ +if [ -d ${DP_BACKUP_DIR} ]; then + rm -rf ${DP_BACKUP_DIR} +fi +mkdir -p ${DP_BACKUP_DIR} + +# TODO: support endpoint env for sharding cluster. +mongo_uri="mongodb://${DP_DB_HOST}:${DP_DB_PORT}" +START_TIME=`get_current_time` +mongodump --uri ${mongo_uri} -u ${DP_DB_USER} -p ${DP_DB_PASSWORD} --authenticationDatabase admin --out ${DP_BACKUP_DIR} + +# stat and save the backup information +stat_and_save_backup_info $START_TIME \ No newline at end of file diff --git a/deploy/mongodb/dataprotection/mongodump-restore.sh b/deploy/mongodb/dataprotection/mongodump-restore.sh new file mode 100644 index 00000000000..66ebfe5b4f2 --- /dev/null +++ b/deploy/mongodb/dataprotection/mongodump-restore.sh @@ -0,0 +1,6 @@ +mongo_uri="mongodb://${DP_DB_HOST}:${DP_DB_PORT}" +for dir_name in $(ls ${DP_BACKUP_DIR} -l | grep ^d | awk '{print $9}'); do + database_dir=${DP_BACKUP_DIR}/$dir_name + echo "INFO: restoring from ${database_dir}" + mongorestore --uri ${mongo_uri} -u ${MONGODB_ROOT_USER} -p ${MONGODB_ROOT_PASSWORD} -d $dir_name --authenticationDatabase admin ${database_dir} +done \ No newline at end of file diff --git a/deploy/mongodb/dataprotection/pitr-backup.sh b/deploy/mongodb/dataprotection/pitr-backup.sh index 211b16c0033..eb9cfd80e2d 100644 --- a/deploy/mongodb/dataprotection/pitr-backup.sh +++ b/deploy/mongodb/dataprotection/pitr-backup.sh @@ -1,12 +1,12 @@ #!/bin/bash -mkdir -p ${BACKUP_DIR} && cd ${BACKUP_DIR} +mkdir -p ${DP_BACKUP_DIR} && cd ${DP_BACKUP_DIR} # retention 8 days by default retention_minute="" if [ ! -z ${LOGFILE_TTL_SECOND} ];then retention_minute=$((${LOGFILE_TTL_SECOND}/60)) fi export MONGODB_URI="mongodb://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:27017/?authSource=admin" -export WALG_FILE_PREFIX=${BACKUP_DIR} +export WALG_FILE_PREFIX=${DP_BACKUP_DIR} export OPLOG_ARCHIVE_TIMEOUT_INTERVAL=${ARCHIVE_INTERVAL} export OPLOG_ARCHIVE_AFTER_SIZE=${ARCHIVE_AFTER_SIZE} retryTimes=0 @@ -45,13 +45,13 @@ check_oplog_push_process(){ } save_backup_status() { - TOTAL_SIZE=$(du -shx ${BACKUP_DIR}|awk '{print $1}') - OLDEST_FILE=$(ls -t ${BACKUP_DIR}/oplog_005 | tail -n 1) && OLDEST_FILE=${OLDEST_FILE#*_} && LOG_START_TIME=${OLDEST_FILE%%.*} - LATEST_FILE=$(ls -t ${BACKUP_DIR}/oplog_005 | head -n 1) && LATEST_FILE=${LATEST_FILE##*_} && LOG_STOP_TIME=${LATEST_FILE%%.*} + TOTAL_SIZE=$(du -shx ${DP_BACKUP_DIR}|awk '{print $1}') + OLDEST_FILE=$(ls -t ${DP_BACKUP_DIR}/oplog_005 | tail -n 1) && OLDEST_FILE=${OLDEST_FILE#*_} && LOG_START_TIME=${OLDEST_FILE%%.*} + LATEST_FILE=$(ls -t ${DP_BACKUP_DIR}/oplog_005 | head -n 1) && LATEST_FILE=${LATEST_FILE##*_} && LOG_STOP_TIME=${LATEST_FILE%%.*} if [ ! -z $LOG_START_TIME ]; then START_TIME=$(date -d "@${LOG_START_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') STOP_TIME=$(date -d "@${LOG_STOP_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') - echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupLog\":{\"startTime\":\"${START_TIME}\",\"stopTime\":\"${STOP_TIME}\"},\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info + echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupLog\":{\"startTime\":\"${START_TIME}\",\"stopTime\":\"${STOP_TIME}\"},\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${DP_BACKUP_DIR}/backup.info fi } # purge the expired files @@ -60,8 +60,8 @@ purge_expired_files() { purgeCounter=$((purgeCounter+3)) if [ $purgeCounter -ge 60 ]; then purgeCounter=0 - fileCount=$(find ${BACKUP_DIR}/oplog_005 -mmin +${retention_minute} -name "*.lz4" | wc -l) - find ${BACKUP_DIR}/oplog_005 -mmin +${retention_minute} -name "*.lz4" -exec rm -rf {} \; + fileCount=$(find ${DP_BACKUP_DIR}/oplog_005 -mmin +${retention_minute} -name "*.lz4" | wc -l) + find ${DP_BACKUP_DIR}/oplog_005 -mmin +${retention_minute} -name "*.lz4" -exec rm -rf {} \; if [ ${fileCount} -gt 0 ]; then echo "clean up expired oplog file successfully, file count: ${fileCount}" fi @@ -75,7 +75,7 @@ trap "echo 'Terminating...' && kill $wal_g_pid" TERM while true; do check_oplog_push_process sleep 1 - if [ -d ${BACKUP_DIR}/oplog_005 ];then + if [ -d ${DP_BACKUP_DIR}/oplog_005 ];then save_backup_status # purge the expired oplog purge_expired_files diff --git a/deploy/mongodb/scripts/replicaset-setup.tpl b/deploy/mongodb/scripts/replicaset-setup.tpl index 22c4a44164e..e62c4542ddc 100644 --- a/deploy/mongodb/scripts/replicaset-setup.tpl +++ b/deploy/mongodb/scripts/replicaset-setup.tpl @@ -31,6 +31,7 @@ then $CLIENT --quiet --port $PORT_FOR_RESTORE admin --eval 'db.dropUser("root", {w: "majority", wtimeout: 4000})' || true kill $PID wait $PID + echo "INFO: restore set-up configuration successfully." rm $BACKUPFILE fi diff --git a/deploy/mongodb/templates/actionset-datafile.yaml b/deploy/mongodb/templates/actionset-datafile.yaml new file mode 100644 index 00000000000..3f500d26fec --- /dev/null +++ b/deploy/mongodb/templates/actionset-datafile.yaml @@ -0,0 +1,58 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: mongodb-physical-backup + labels: + clusterdefinition.kubeblocks.io/name: mongodb + {{- include "mongodb.labels" . | nindent 4 }} +spec: + env: + - name: DATA_DIR + value: {{ .Values.dataMountPath }}/db + backupType: Full + backup: + preBackup: [] + postBackup: [] + backupData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + runOnTargetPodNode: true + syncProgress: + enabled: true + intervalSeconds: 5 + command: + - bash + - -c + - | + {{- .Files.Get "dataprotection/backup-info-collector.sh" | nindent 8 }} + {{- .Files.Get "dataprotection/datafile-backup.sh" | nindent 8 }} + + restore: + prepareData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + command: + - sh + - -c + - | + {{- .Files.Get "dataprotection/datafile-restore.sh" | nindent 8 }} + postReady: [] +--- +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: mongodb-volumesnapshot + labels: + clusterdefinition.kubeblocks.io/name: apecloud-mysql +spec: + backupType: Full + env: + - name: DATA_DIR + value: {{ .Values.dataMountPath }}/db + backup: {} + restore: + prepareData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + command: + - sh + - -c + - "touch ${DATA_DIR}/mongodb.backup; sync" + postReady: [] \ No newline at end of file diff --git a/deploy/mongodb/templates/actionset-dump.yaml b/deploy/mongodb/templates/actionset-dump.yaml new file mode 100644 index 00000000000..97ac4a868a8 --- /dev/null +++ b/deploy/mongodb/templates/actionset-dump.yaml @@ -0,0 +1,39 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: mongodb-dump + labels: + clusterdefinition.kubeblocks.io/name: mongodb + {{- include "mongodb.labels" . | nindent 4 }} +spec: + env: + - name: DATA_DIR + value: {{ .Values.dataMountPath }}/db + - name: DP_DB_PORT + value: "27017" + backupType: Full + backup: + preBackup: [] + postBackup: [] + backupData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + runOnTargetPodNode: false + syncProgress: + enabled: true + intervalSeconds: 5 + command: + - bash + - -c + - | + {{- .Files.Get "dataprotection/backup-info-collector.sh" | nindent 8 }} + {{- .Files.Get "dataprotection/mongodump-backup.sh" | nindent 8 }} + restore: + postReady: + - job: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + runOnTargetPodNode: false + command: + - sh + - -c + - | + {{- .Files.Get "dataprotection/mongodump-restore.sh" | nindent 10 }} \ No newline at end of file diff --git a/deploy/mongodb/templates/backuppolicytemplate.yaml b/deploy/mongodb/templates/backuppolicytemplate.yaml index ced3c1835ed..fc5e359b374 100644 --- a/deploy/mongodb/templates/backuppolicytemplate.yaml +++ b/deploy/mongodb/templates/backuppolicytemplate.yaml @@ -9,37 +9,36 @@ spec: clusterDefinitionRef: mongodb backupPolicies: - componentDefRef: mongodb - retention: - ttl: 7d - schedule: - startingDeadlineMinutes: 120 - snapshot: - enable: false - cronExpression: "0 18 * * *" - datafile: - enable: false - cronExpression: "0 18 * * *" - logfile: - enable: false - cronExpression: "*/1 * * * *" - snapshot: - target: - role: primary - hooks: - containerName: mongodb - preCommands: - - "touch /data/mongodb/db/mongodb.backup; sync" - postCommands: - - "rm -f /data/mongodb/db/mongodb.backup; sync" - datafile: - backupToolName: mongodb-dump-tool - backupsHistoryLimit: 7 - target: - role: primary - backupStatusUpdates: - - updateStage: post - useTargetPodServiceAccount: true - logfile: - backupToolName: mongodb-pitr-backup-tool - target: - role: primary \ No newline at end of file + retentionPeriod: 7d + target: + role: follower + backupMethods: + - name: datafile + snapshotVolumes: false + actionSetName: mongodb-physical-backup + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} + - name: volume-snapshot + snapshotVolumes: true + actionSetName: mongodb-volumesnapshot + targetVolumes: + volumes: + - data + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} + - name: dump + snapshotVolumes: false + actionSetName: mongodb-dump + schedules: + - backupMethod: datafile + enabled: false + cronExpression: "0 18 * * *" + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * *" + - backupMethod: dump + enabled: false + cronExpression: "0 18 * * *" \ No newline at end of file diff --git a/deploy/mongodb/templates/backuptool.yaml b/deploy/mongodb/templates/backuptool.yaml deleted file mode 100644 index 42b9a23248d..00000000000 --- a/deploy/mongodb/templates/backuptool.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: mongodb-physical-backup-tool - labels: - clusterdefinition.kubeblocks.io/name: mongodb - {{- include "mongodb.labels" . | nindent 4 }} -spec: - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} - deployKind: job - env: - - name: DATA_DIR - value: /data/mongodb/db - physical: - restoreCommands: - - sh - - -c - - | - set -e - mkdir -p ${DATA_DIR} - res=`ls -A ${DATA_DIR}` - data_protection_file=${DATA_DIR}/.kb-data-protection - if [ ! -z "${res}" ] && [ ! -f ${data_protection_file} ]; then - echo "${DATA_DIR} is not empty! Please make sure that the directory is empty before restoring the backup." - exit 1 - fi - touch ${data_protection_file} && sync - tar -xvf ${BACKUP_DIR}/${BACKUP_NAME}.tar.gz -C ${DATA_DIR} - rm -rf ${data_protection_file} && sync - incrementalRestoreCommands: [] - logical: - restoreCommands: [] - incrementalRestoreCommands: [] - backupCommands: - - bash - - -c - - | - if [ -d ${BACKUP_DIR} ]; then - rm -rf ${BACKUP_DIR} - fi - mkdir -p ${BACKUP_DIR} && cd ${DATA_DIR} - touch mongodb.backup && sync - {{- .Files.Get "dataprotection/backup-info-collector.sh" | nindent 6 }} - - START_TIME=`get_current_time` - # TODO: flush data and locked write, otherwise data maybe inconsistent - tar -czvf ${BACKUP_DIR}/${BACKUP_NAME}.tar.gz ./ - rm -rf mongodb.backup - - # stat and save the backup information - stat_and_save_backup_info $START_TIME - incrementalBackupCommands: [] diff --git a/deploy/mongodb/templates/backuptool_mongodump.yaml b/deploy/mongodb/templates/backuptool_mongodump.yaml deleted file mode 100644 index 66117013ea3..00000000000 --- a/deploy/mongodb/templates/backuptool_mongodump.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: mongodb-dump-tool - labels: - clusterdefinition.kubeblocks.io/name: mongodb - {{- include "mongodb.labels" . | nindent 4 }} -spec: - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} - deployKind: job - env: - - name: DATA_DIR - value: /data/mongodb/db - logical: - restoreCommands: - - sh - - -c - - | - mongo_uri="mongodb://${DB_HOST}:27017" - for dir_name in $(ls ${BACKUP_DIR} -l | grep ^d | awk '{print $9}'); do - database_dir=${BACKUP_DIR}/$dir_name - echo "INFO: restoring from ${database_dir}" - mongorestore --uri ${mongo_uri} -u ${MONGODB_ROOT_USER} -p ${MONGODB_ROOT_PASSWORD} -d $dir_name --authenticationDatabase admin ${database_dir} - done - backupCommands: - - bash - - -c - - | - if [ -d ${BACKUP_DIR} ]; then - rm -rf ${BACKUP_DIR} - fi - mkdir -p ${BACKUP_DIR} - {{- .Files.Get "dataprotection/backup-info-collector.sh" | nindent 6 }} - - # TODO: support endpoint env for sharding cluster. - mongo_uri="mongodb://${DB_HOST}:27017" - START_TIME=`get_current_time` - mongodump --uri ${mongo_uri} -u ${DB_USER} -p ${DB_PASSWORD} --authenticationDatabase admin --out ${BACKUP_DIR} - - # stat and save the backup information - stat_and_save_backup_info $START_TIME \ No newline at end of file diff --git a/deploy/mongodb/templates/backuptool_pitr.yaml b/deploy/mongodb/templates/backuptool_pitr.yaml deleted file mode 100644 index 04af2e45d77..00000000000 --- a/deploy/mongodb/templates/backuptool_pitr.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: mongodb-pitr-backup-tool - labels: - clusterdefinition.kubeblocks.io/name: mongodb - {{- include "mongodb.labels" . | nindent 4 }} -spec: - type: pitr - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.walg.repository }}:{{ .Values.walg.tag }} - deployKind: statefulSet - env: - - name: ARCHIVE_AFTER_SIZE - value: "20971520" - physical: - restoreCommands: [] - incrementalRestoreCommands: [] - logical: - podScope: ReadWrite - restoreCommands: - - bash - - -c - - | - #!/bin/bash - set -e - export MONGODB_URI="mongodb://${MONGODB_ROOT_USER}:${MONGODB_ROOT_PASSWORD}@${DB_HOST}:27017/?authSource=admin&replicaSet=${KB_CLUSTER_COMP_NAME}" - export WALG_FILE_PREFIX=${BACKUP_DIR} - echo "wal-g oplog-replay ${BASE_BACKUP_START_TIMESTAMP}.1 ${KB_RECOVERY_TIMESTAMP}.1" - wal-g oplog-replay ${BASE_BACKUP_START_TIMESTAMP}.1 ${KB_RECOVERY_TIMESTAMP}.1 - incrementalRestoreCommands: [] - backupCommands: - - bash - - -c - - | - {{- .Files.Get "dataprotection/pitr-backup.sh" | nindent 6 }} - incrementalBackupCommands: [] diff --git a/deploy/mongodb/templates/clusterdefinition.yaml b/deploy/mongodb/templates/clusterdefinition.yaml index 61838f56dfe..f4e9bee0ce4 100644 --- a/deploy/mongodb/templates/clusterdefinition.yaml +++ b/deploy/mongodb/templates/clusterdefinition.yaml @@ -109,7 +109,7 @@ spec: key: password optional: false volumeMounts: - - mountPath: /data/mongodb + - mountPath: {{ .Values.dataMountPath }} name: data - mountPath: /etc/mongodb/mongodb.conf name: mongodb-config diff --git a/deploy/mongodb/values.yaml b/deploy/mongodb/values.yaml index 103b1ad5b11..959e7e9d6e0 100644 --- a/deploy/mongodb/values.yaml +++ b/deploy/mongodb/values.yaml @@ -36,6 +36,8 @@ auth: logConfigs: running: /data/mongodb/logs/mongodb.log* +dataMountPath: /data/mongodb + metrics: image: registry: registry.cn-hangzhou.aliyuncs.com diff --git a/deploy/oracle-mysql/templates/actionset-xtrabackup.yaml b/deploy/oracle-mysql/templates/actionset-xtrabackup.yaml new file mode 100644 index 00000000000..243bb87e2e2 --- /dev/null +++ b/deploy/oracle-mysql/templates/actionset-xtrabackup.yaml @@ -0,0 +1,49 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: oracle-mysql-xtrabackup + labels: + clusterdefinition.kubeblocks.io/name: oracle-mysql + {{- include "oracle-mysql.labels" . | nindent 4 }} +spec: + backupType: Full + env: + - name: DATA_DIR + value: /var/lib/mysql + backup: + preBackup: [] + postBackup: [] + backupData: + image: docker.io/perconalab/percona-xtrabackup:8.0.32 + runOnTargetPodNode: false + command: + - bash + - -c + - | + set -e; + mkdir -p ${DP_BACKUP_DIR}; + xtrabackup --backup --safe-slave-backup --slave-info --stream=xbstream \ + --host=${DP_DB_HOST} --user=${DP_DB_USER} --password=${DP_DB_PASSWORD} --datadir=${DATA_DIR} > ${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.xbstream + TOTAL_SIZE=$(du -shx ${DP_BACKUP_DIR}|awk '{print $1}') + echo "{\"totalSize\":\"$TOTAL_SIZE\"}" > ${DP_BACKUP_DIR}/backup.info + syncProgress: + enabled: true + intervalSeconds: 5 + restore: + prepareData: + image: docker.io/perconalab/percona-xtrabackup:8.0.32 + command: + - bash + - -c + - | + set -e; + mkdir -p ${DATA_DIR} + TMP_DIR=/data/mysql/temp + mkdir -p ${TMP_DIR} && cd ${TMP_DIR} + xbstream -x < ${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.xbstream + xtrabackup --decompress --remove-original --target-dir=${TMP_DIR} + xtrabackup --prepare --target-dir=${TMP_DIR} + xtrabackup --move-back --target-dir=${TMP_DIR} --datadir=${DATA_DIR}/ + rm -rf ${TMP_DIR} + chmod -R 0777 ${DATA_DIR} + postReady: [] \ No newline at end of file diff --git a/deploy/oracle-mysql/templates/backuppolicytemplate.yaml b/deploy/oracle-mysql/templates/backuppolicytemplate.yaml index 2543ea7bd4b..680a6a201fe 100644 --- a/deploy/oracle-mysql/templates/backuppolicytemplate.yaml +++ b/deploy/oracle-mysql/templates/backuppolicytemplate.yaml @@ -9,14 +9,24 @@ spec: clusterDefinitionRef: oracle-mysql backupPolicies: - componentDefRef: mysql-compdef - schedule: - snapshot: - enable: true - cronExpression: "0 18 * * *" - datafile: - enable: false - cronExpression: "0 18 * * *" - snapshot: - backupsHistoryLimit: 5 - datafile: - backupToolName: oracle-mysql-xtrabackup \ No newline at end of file + retentionPeriod: 7d + backupMethods: + - name: xtrabackup + snapshotVolumes: false + actionSetName: oracle-mysql-xtrabackup + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} + - name: volume-snapshot + snapshotVolumes: true + targetVolumes: + volumes: + - data + schedules: + - backupMethod: datafile + enabled: false + cronExpression: "0 18 * * 0" + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * 0" \ No newline at end of file diff --git a/deploy/oracle-mysql/templates/backuptool.yaml b/deploy/oracle-mysql/templates/backuptool.yaml deleted file mode 100644 index 13672fcf4c8..00000000000 --- a/deploy/oracle-mysql/templates/backuptool.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: oracle-mysql-xtrabackup - labels: - clusterdefinition.kubeblocks.io/name: oracle-mysql - {{- include "oracle-mysql.labels" . | nindent 4 }} -spec: - image: docker.io/perconalab/percona-xtrabackup:8.0.32 - deployKind: job - env: - - name: DATA_DIR - value: /var/lib/mysql - physical: - restoreCommands: - - sh - - -c - - | - set -e; - mkdir -p ${DATA_DIR} - TMP_DIR=/data/mysql/temp - mkdir -p ${TMP_DIR} && cd ${TMP_DIR} - xbstream -x < ${BACKUP_DIR}/${BACKUP_NAME}.xbstream - xtrabackup --decompress --remove-original --target-dir=${TMP_DIR} - xtrabackup --prepare --target-dir=${TMP_DIR} - xtrabackup --move-back --target-dir=${TMP_DIR} --datadir=${DATA_DIR}/ - rm -rf ${TMP_DIR} - chmod -R 0777 ${DATA_DIR} - backupCommands: - - sh - - -c - - | - set -e; - mkdir -p ${BACKUP_DIR}; - xtrabackup --backup --safe-slave-backup --slave-info --stream=xbstream \ - --host=${DB_HOST} --user=${DB_USER} --password=${DB_PASSWORD} --datadir=${DATA_DIR} > ${BACKUP_DIR}/${BACKUP_NAME}.xbstream diff --git a/deploy/oracle-mysql/templates/clusterdefinition.yaml b/deploy/oracle-mysql/templates/clusterdefinition.yaml index 927d25e7b69..24a1d016b1b 100644 --- a/deploy/oracle-mysql/templates/clusterdefinition.yaml +++ b/deploy/oracle-mysql/templates/clusterdefinition.yaml @@ -40,7 +40,7 @@ spec: - name: mysql-container imagePullPolicy: IfNotPresent volumeMounts: - - mountPath: /var/lib/mysql + - mountPath: {{ .Values.dataMountPath }} name: data - mountPath: /etc/mysql/conf.d name: configs diff --git a/deploy/oracle-mysql/values.yaml b/deploy/oracle-mysql/values.yaml index 032a62fc03e..d960cd7c9ec 100644 --- a/deploy/oracle-mysql/values.yaml +++ b/deploy/oracle-mysql/values.yaml @@ -33,3 +33,5 @@ nameOverride: "" fullnameOverride: "" ## MySQl ClusterVersion clusterVersionOverride: "8.0.32" + +dataMountPath: /var/lib/mysql diff --git a/deploy/postgresql/dataprotection/backup-info-collector.sh b/deploy/postgresql/dataprotection/backup-info-collector.sh index 8844dd80ec1..2324b0b2a1d 100644 --- a/deploy/postgresql/dataprotection/backup-info-collector.sh +++ b/deploy/postgresql/dataprotection/backup-info-collector.sh @@ -1,5 +1,5 @@ function get_current_time() { - curr_time=$(psql -U ${DB_USER} -h ${DB_HOST} -d postgres -t -c "SELECT now() AT TIME ZONE 'UTC'") + curr_time=$(psql -U ${DP_DB_USER} -h ${DP_DB_HOST} -d postgres -t -c "SELECT now() AT TIME ZONE 'UTC'") echo $curr_time } @@ -11,6 +11,6 @@ function stat_and_save_backup_info() { fi START_TIME=$(date -d "${START_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') STOP_TIME=$(date -d "${STOP_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') - TOTAL_SIZE=$(du -shx ${BACKUP_DIR}|awk '{print $1}') - echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupLog\":{\"startTime\":\"${START_TIME}\",\"stopTime\":\"${STOP_TIME}\"},\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info + TOTAL_SIZE=$(du -shx ${DP_BACKUP_DIR}|awk '{print $1}') + echo "{\"totalSize\":\"$TOTAL_SIZE\",\"timeRange\":{\"start\":\"${START_TIME}\",\"end\":\"${STOP_TIME}\"}}" > ${DP_BACKUP_DIR}/backup.info } \ No newline at end of file diff --git a/deploy/postgresql/dataprotection/fetch-wal-log.sh b/deploy/postgresql/dataprotection/fetch-wal-log.sh deleted file mode 100644 index a89b0050c76..00000000000 --- a/deploy/postgresql/dataprotection/fetch-wal-log.sh +++ /dev/null @@ -1,57 +0,0 @@ -function fetch-wal-log(){ - backup_log_dir=$1 - wal_destination_dir=$2 - start_wal_name=$3 - restore_time=`date -d "$4" +%s` - pitr=$5 - echo "PITR: $pitr" - - if [[ ! -d ${backup_log_dir} ]]; then - echo "ERROR: ${backup_log_dir} not exists" - exit 1 - fi - - exit_fetch_wal=0 && mkdir -p $wal_destination_dir - for dir_name in $(ls ${backup_log_dir} -l | grep ^d | awk '{print $9}' | sort); do - if [[ $exit_fetch_wal -eq 1 ]]; then - exit 0 - fi - - cd ${backup_log_dir}/${dir_name} - # check if the latest_wal_log after the start_wal_log - latest_wal=$(ls | sort | tail -n 1) - if [[ $latest_wal < $start_wal_name ]]; then - continue - fi - - echo "INFO: start to fetch wal logs from ${backup_log_dir}/${dir_name}" - for file in $(ls | sort | grep ".gz"); do - if [[ $file < $start_wal_name ]]; then - continue - fi - if [[ $pitr != "true" && $file =~ ".history" ]]; then - # if not restored for pitr, only fetch the current timeline log - echo "INFO: exit for new timeline." - exit_fetch_wal=1 - break - fi - - if [ ! -f $file ]; then - echo "ERROR: $file was deleted during fetching the wal log. Please try again!" - exit 1 - fi - wal_name=${file%.*} - echo "INFO: copying $wal_name" - gunzip -c $file > ${wal_destination_dir}/$wal_name - - # check if the wal_log contains the restore_time logs. if ture, stop fetching - latest_commit_time=$(pg_waldump ${wal_destination_dir}/$wal_name --rmgr=Transaction 2>/dev/null |tail -n 1|awk -F ' COMMIT ' '{print $2}'|awk -F ';' '{print $1}') - timestamp=`date -d "$latest_commit_time" +%s` - if [[ $latest_commit_time != "" && $timestamp > $restore_time ]]; then - echo "INFO: exit when reaching the target time log." - exit_fetch_wal=1 - break - fi - done - done -} \ No newline at end of file diff --git a/deploy/postgresql/dataprotection/pg-basebackup-backup.sh b/deploy/postgresql/dataprotection/pg-basebackup-backup.sh new file mode 100644 index 00000000000..0fb6296841a --- /dev/null +++ b/deploy/postgresql/dataprotection/pg-basebackup-backup.sh @@ -0,0 +1,12 @@ +set -e; +if [ -d ${DP_BACKUP_DIR} ]; then + rm -rf ${DP_BACKUP_DIR} +fi +mkdir -p ${DP_BACKUP_DIR}; +export PGPASSWORD=${DP_DB_PASSWORD} + +START_TIME=`get_current_time` +echo ${DP_DB_PASSWORD} | pg_basebackup -Ft -Pv -c fast -Xs -Z${COMPRESS_LEVEL} -D ${DP_BACKUP_DIR} -h ${DP_DB_HOST} -U ${DP_DB_USER} -W; + +# stat and save the backup information +stat_and_save_backup_info $START_TIME \ No newline at end of file diff --git a/deploy/postgresql/dataprotection/pg-basebackup-restore.sh b/deploy/postgresql/dataprotection/pg-basebackup-restore.sh new file mode 100644 index 00000000000..1be2f6ec70a --- /dev/null +++ b/deploy/postgresql/dataprotection/pg-basebackup-restore.sh @@ -0,0 +1,15 @@ +set -e; +cd ${DP_BACKUP_DIR}; +mkdir -p ${DATA_DIR}; +# compatible with gzip compression +if [ -f base.tar.gz ];then + tar -xvf base.tar.gz -C ${DATA_DIR}/; +else + tar -xvf base.tar -C ${DATA_DIR}/; +fi +if [ -f pg_wal.tar.gz ];then + tar -xvf pg_wal.tar.gz -C ${DATA_DIR}/pg_wal/; +else + tar -xvf pg_wal.tar -C ${DATA_DIR}/pg_wal/; +fi +echo "done!"; \ No newline at end of file diff --git a/deploy/postgresql/dataprotection/pitr-backup.sh b/deploy/postgresql/dataprotection/pitr-backup.sh deleted file mode 100644 index c882af2cf55..00000000000 --- a/deploy/postgresql/dataprotection/pitr-backup.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash -export PGPASSWORD=${DB_PASSWORD} -PSQL="psql -h ${DB_HOST} -U ${DB_USER} -d postgres" -last_switch_wal_time=$(date +%s) -last_purge_time=$(date +%s) -STOP_TIME= -switch_wal_interval=300 - -if [[ ${SWITCH_WAL_INTERVAL_SECONDS} =~ ^[0-9]+$ ]];then - switch_wal_interval=${SWITCH_WAL_INTERVAL_SECONDS} -fi - -backup_in_secondary= -if [ "${DP_POD_ROLE}" == "primary" ]; then - backup_in_secondary=f -elif [ "${DP_POD_ROLE}" == "secondary" ]; then - backup_in_secondary=t -fi - -function log() { - msg=$1 - local curr_date=$(date -u '+%Y-%m-%d %H:%M:%S') - echo "${curr_date} INFO: $msg" -} - -function purge_expired_files() { - # clean up expired logfiles, interval is 60s - local curr_time=$(date +%s) - local diff_time=$((${curr_time}-${last_purge_time})) - if [[ -z ${LOGFILE_TTL_SECOND} || ${diff_time} -lt 60 ]]; then - return - fi - retention_day=$((${LOGFILE_TTL_SECOND}/86400)) - EXPIRED_INCR_LOG=${BACKUP_DIR}/$(date -d"${retention_day} day ago" +%Y%m%d); - if [ -d ${EXPIRED_INCR_LOG} ]; then - rm -rf ${EXPIRED_INCR_LOG}; - fi - last_purge_time=${curr_time} -} - -function switch_wal_log() { - local curr_time=$(date +%s) - local diff_time=$((${curr_time}-${last_switch_wal_time})) - if [[ ${diff_time} -lt ${switch_wal_interval} ]]; then - return - fi - LAST_TRANS=$(pg_waldump $(${PSQL} -Atc "select pg_walfile_name(pg_current_wal_lsn())") --rmgr=Transaction 2>/dev/null |tail -n 1) - if [ "${LAST_TRANS}" != "" ] && [ "$(find ${LOG_DIR}/archive_status/ -name '*.ready')" = "" ]; then - log "start to switch wal file" - ${PSQL} -c "select pg_switch_wal()" - for i in $(seq 1 60); do - if [ "$(find ${LOG_DIR}/archive_status/ -name '*.ready')" != "" ]; then - log "switch wal file successfully" - break; - fi - sleep 1 - done - fi - last_switch_wal_time=${curr_time} -} - -function upload_wal_log() { - TODAY_INCR_LOG=${BACKUP_DIR}/$(date +%Y%m%d); - mkdir -p ${TODAY_INCR_LOG}; - cd ${LOG_DIR} - for i in $(ls -tr ./archive_status/ | grep .ready); do - wal_name=${i%.*} - LOG_STOP_TIME=$(pg_waldump ${wal_name} --rmgr=Transaction 2>/dev/null |tail -n 1|awk -F ' COMMIT ' '{print $2}'|awk -F ';' '{print $1}') - if [[ ! -z $LOG_STOP_TIME ]];then - STOP_TIME=$(date -d "${LOG_STOP_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') - fi - if [ -f ${wal_name} ]; then - log "upload ${wal_name}" - gzip -kqc ${wal_name} > ${TODAY_INCR_LOG}/${wal_name}.gz; - mv -f ./archive_status/${i} ./archive_status/${wal_name}.done; - fi - done -} - -function save_backup_status() { - TOTAL_SIZE=$(du -shx ${BACKUP_DIR}|awk '{print $1}') - if [[ -z ${STOP_TIME} ]];then - echo "{\"totalSize\":\"${TOTAL_SIZE}\",\"manifests\":{\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info - else - echo "{\"totalSize\":\"${TOTAL_SIZE}\",\"manifests\":{\"backupLog\":{\"stopTime\":\"${STOP_TIME}\"},\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info - fi -} - -function check_pg_process() { - is_ok=false - for ((i=1;i<4;i++));do - is_secondary=$(${PSQL} -Atc "select pg_is_in_recovery()") - if [[ $? -eq 0 && (-z ${backup_in_secondary} || "${backup_in_secondary}" == "${is_secondary}") ]]; then - is_ok=true - break - fi - echo "Warning: target backup pod/${DP_TARGET_POD_NAME} is not OK, target role: ${DP_POD_ROLE}, pg_is_in_recovery: ${is_secondary}, retry detection!" - sleep 1 - done - if [[ ${is_ok} == "false" ]];then - echo "ERROR: target backup pod/${DP_TARGET_POD_NAME} is not OK, target role: ${DP_POD_ROLE}, pg_is_in_recovery: ${is_secondary}!" - exit 1 - fi -} - -# trap term signal -trap "echo 'Terminating...' && sync && exit 0" TERM -log "start to archive wal logs" -while true; do - - # check if pg process is ok - check_pg_process - - # switch wal log - switch_wal_log - - # upload wal log - upload_wal_log - - # save backup status which will be updated to `backup` CR by the sidecar - save_backup_status - - # purge the expired wal logs - purge_expired_files - sleep ${DP_INTERVAL_SECONDS} -done \ No newline at end of file diff --git a/deploy/postgresql/dataprotection/pitr-restore.sh b/deploy/postgresql/dataprotection/pitr-restore.sh deleted file mode 100644 index 0bf4ed28a2a..00000000000 --- a/deploy/postgresql/dataprotection/pitr-restore.sh +++ /dev/null @@ -1,30 +0,0 @@ - -if [ -d ${DATA_DIR}.old ]; - then echo "${DATA_DIR}.old directory already exists, skip restore."; - exit 0; -fi - -mkdir -p ${PITR_DIR}; - -latest_wal=$(ls ${DATA_DIR}/pg_wal -lI "*.history" | grep ^- | awk '{print $9}' | sort | tail -n 1) -start_wal_log=`basename $latest_wal` - -echo "fetch-wal-log ${BACKUP_DIR} ${PITR_DIR} ${start_wal_log} \"${KB_RECOVERY_TIME}\" true" -fetch-wal-log ${BACKUP_DIR} ${PITR_DIR} ${start_wal_log} "${KB_RECOVERY_TIME}" true - -chmod 777 -R ${PITR_DIR}; -touch ${DATA_DIR}/recovery.signal; -mkdir -p ${CONF_DIR}; -chmod 777 -R ${CONF_DIR}; -mkdir -p ${RESTORE_SCRIPT_DIR}; -echo "#!/bin/bash" > ${RESTORE_SCRIPT_DIR}/kb_restore.sh; -echo "[[ -d '${DATA_DIR}.old' ]] && mv -f ${DATA_DIR}.old/* ${DATA_DIR}/;" >> ${RESTORE_SCRIPT_DIR}/kb_restore.sh; -echo "sync;" >> ${RESTORE_SCRIPT_DIR}/kb_restore.sh; -chmod +x ${RESTORE_SCRIPT_DIR}/kb_restore.sh; -echo "restore_command='case "%f" in *history) cp ${PITR_DIR}/%f %p ;; *) mv ${PITR_DIR}/%f %p ;; esac'" > ${CONF_DIR}/recovery.conf; -echo "recovery_target_time='${KB_RECOVERY_TIME}'" >> ${CONF_DIR}/recovery.conf; -echo "recovery_target_action='promote'" >> ${CONF_DIR}/recovery.conf; -echo "recovery_target_timeline='latest'" >> ${CONF_DIR}/recovery.conf; -mv ${DATA_DIR} ${DATA_DIR}.old; -echo "done."; -sync; \ No newline at end of file diff --git a/deploy/postgresql/templates/actionset-pgbasebackup.yaml b/deploy/postgresql/templates/actionset-pgbasebackup.yaml new file mode 100644 index 00000000000..94e8904d0da --- /dev/null +++ b/deploy/postgresql/templates/actionset-pgbasebackup.yaml @@ -0,0 +1,38 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: postgres-basebackup + labels: + clusterdefinition.kubeblocks.io/name: postgresql + {{- include "postgresql.labels" . | nindent 4 }} +spec: + backupType: Full + env: + - name: DATA_DIR + value: {{ .Values.dataMountPath }}/pgroot/data + - name: COMPRESS_LEVEL + value: "0" + backup: + preBackup: [] + postBackup: [] + backupData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + runOnTargetPodNode: false + command: + - bash + - -c + - | + {{- .Files.Get "dataprotection/backup-info-collector.sh" | nindent 8 }} + {{- .Files.Get "dataprotection/pg-basebackup-backup.sh" | nindent 8 }} + syncProgress: + enabled: true + intervalSeconds: 5 + restore: + prepareData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + command: + - bash + - -c + - | + {{- .Files.Get "dataprotection/pg-basebackup-restore.sh" | nindent 8 }} + postReady: [] diff --git a/deploy/postgresql/templates/backuppolicytemplate.yaml b/deploy/postgresql/templates/backuppolicytemplate.yaml index ed9c871ce7b..dc0467666bf 100644 --- a/deploy/postgresql/templates/backuppolicytemplate.yaml +++ b/deploy/postgresql/templates/backuppolicytemplate.yaml @@ -5,53 +5,30 @@ metadata: labels: clusterdefinition.kubeblocks.io/name: postgresql {{- include "postgresql.labels" . | nindent 4 }} - annotations: - dataprotection.kubeblocks.io/reconfigure-ref: | - { - "name": "postgresql-configuration", - "key": "postgresql.conf", - "enable": { - "logfile": [{"key": "archive_command","value": "''"}] - }, - "disable": { - "logfile": [{ "key": "archive_command","value": "'/bin/true'"}] - } - } spec: clusterDefinitionRef: postgresql backupPolicies: - componentDefRef: postgresql - retention: - ttl: 7d - schedule: - startingDeadlineMinutes: 120 - snapshot: - enable: false - cronExpression: "0 18 * * *" - datafile: - enable: false - cronExpression: "0 18 * * *" - logfile: - enable: false - cronExpression: "*/2 * * * *" - snapshot: - target: - connectionCredentialKey: - passwordKey: password - usernameKey: username - hooks: - containerName: postgresql - preCommands: - - psql -c "CHECKPOINT;" - datafile: - backupToolName: postgres-basebackup - backupStatusUpdates: - - updateStage: post - useTargetPodServiceAccount: true - logfile: - backupToolName: postgres-pitr - target: - role: primary - backupStatusUpdates: - - updateStage: post - useTargetPodServiceAccount: true \ No newline at end of file + retentionPeriod: 7d + target: + role: secondary + backupMethods: + - name: pg-basebackup + snapshotVolumes: false + actionSetName: postgres-basebackup + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} + - name: volume-snapshot + snapshotVolumes: true + targetVolumes: + volumes: + - data + schedules: + - backupMethod: pg-basebackup + enabled: false + cronExpression: "0 18 * * *" + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * *" \ No newline at end of file diff --git a/deploy/postgresql/templates/backuptool-pgbasebackup.yaml b/deploy/postgresql/templates/backuptool-pgbasebackup.yaml deleted file mode 100644 index 1b46757693b..00000000000 --- a/deploy/postgresql/templates/backuptool-pgbasebackup.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: postgres-basebackup - labels: - clusterdefinition.kubeblocks.io/name: postgresql - {{- include "postgresql.labels" . | nindent 4 }} -spec: - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} - deployKind: job - env: - - name: DATA_DIR - value: /home/postgres/pgdata/pgroot/data - physical: - restoreCommands: - - sh - - -c - - | - #!/bin/sh - set -e; - cd ${BACKUP_DIR}; - mkdir -p ${DATA_DIR}; - # compatible with gzip compression for version 0.5.0 - if [ -f base.tar.gz ];then - tar -xvf base.tar.gz -C ${DATA_DIR}/; - else - tar -xvf base.tar -C ${DATA_DIR}/; - fi - if [ -f pg_wal.tar.gz ];then - tar -xvf pg_wal.tar.gz -C ${DATA_DIR}/pg_wal/; - else - tar -xvf pg_wal.tar -C ${DATA_DIR}/pg_wal/; - fi - echo "done!"; - incrementalRestoreCommands: [] - logical: - restoreCommands: [] - incrementalRestoreCommands: [] - backupCommands: - - bash - - -c - - | - set -e; - if [ -d ${BACKUP_DIR} ]; then - rm -rf ${BACKUP_DIR} - fi - mkdir -p ${BACKUP_DIR}; - export PGPASSWORD=${DB_PASSWORD} - {{- .Files.Get "dataprotection/backup-info-collector.sh" | nindent 6 }} - - START_TIME=`get_current_time` - echo ${DB_PASSWORD} | pg_basebackup -Ft -Pv -c fast -Xs -D ${BACKUP_DIR} -h ${DB_HOST} -U ${DB_USER} -W; - - # stat and save the backup information - stat_and_save_backup_info $START_TIME - incrementalBackupCommands: [] diff --git a/deploy/postgresql/templates/backuptool-pitr.yaml b/deploy/postgresql/templates/backuptool-pitr.yaml deleted file mode 100644 index 05474e68360..00000000000 --- a/deploy/postgresql/templates/backuptool-pitr.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - labels: - clusterdefinition.kubeblocks.io/name: postgresql - kubeblocks.io/backup-tool-type: pitr - {{- include "postgresql.labels" . | nindent 4 }} - name: postgres-pitr -spec: - deployKind: statefulSet - env: - - name: VOLUME_DATA_DIR - value: /home/postgres/pgdata - - name: RESTORE_SCRIPT_DIR - value: "$(VOLUME_DATA_DIR)/kb_restore" - - name: PITR_DIR - value: "$(VOLUME_DATA_DIR)/pitr" - - name: DATA_DIR - value: "$(VOLUME_DATA_DIR)/pgroot/data" - - name: CONF_DIR - value: "$(VOLUME_DATA_DIR)/conf" - - name: TIME_FORMAT - value: 2006-01-02 15:04:05 MST - - name: LOG_DIR - value: $(VOLUME_DATA_DIR)/pgroot/data/pg_wal - - name: DP_POD_ROLE - # TODO input by backup policy - value: primary - - name: DP_INTERVAL_SECONDS - value: "10" - - name: SWITCH_WAL_INTERVAL_SECONDS - value: "600" - image: "" - logical: - restoreCommands: - - sh - - -c - - | - set -e; - rm -f ${CONF_DIR}/recovery.conf; - rm -rf ${PITR_DIR}; - physical: - restoreCommands: - - bash - - -c - - | - #!/bin/bash - set -e; - {{- .Files.Get "dataprotection/fetch-wal-log.sh" | nindent 8 }} - {{- .Files.Get "dataprotection/pitr-restore.sh" | nindent 8 }} - backupCommands: - - bash - - -c - - | - {{- .Files.Get "dataprotection/pitr-backup.sh" | nindent 6 }} - type: pitr \ No newline at end of file diff --git a/deploy/postgresql/templates/backuptool-wal-g.yaml b/deploy/postgresql/templates/backuptool-wal-g.yaml deleted file mode 100644 index 1466406bd16..00000000000 --- a/deploy/postgresql/templates/backuptool-wal-g.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: postgres-wal-g - labels: - clusterdefinition.kubeblocks.io/name: postgresql -spec: - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} - deployKind: job - env: - - name: DATA_DIR - value: /home/postgres/pgdata/pgroot/data - - name: WAL_DIR - value: $(DATA_DIR)/pg_wal - - name: WALG_PG_WAL_SIZE - value: "16" - - name: WALG_TAR_SIZE_THRESHOLD - value: "4294967296" - - name: WALG_UPLOAD_DISK_CONCURRENCY - value: "8" - physical: - relyOnLogfile: true - restoreCommands: - - bash - - -c - - | - #!/bin/bash - # NOTE: this basebackup is only supported when pitr is enabled and rely on archive logs. - # if archive logs are deleted, it will cause the recovery failed from this backup. - set -e; - - {{- .Files.Get "dataprotection/fetch-wal-log.sh" | nindent 8 }} - - # fetch base backup - mkdir -p ${DATA_DIR}; - WALG_FILE_PREFIX=${BACKUP_DIR} wal-g backup-fetch ${DATA_DIR} LATEST - - if [[ ! -z ${BACKUP_LOGFILE_DIR} ]]; then - # get start wal log - start_wal_location=$(cat ${DATA_DIR}/backup_label | grep "START WAL LOCATION") - start_wal_log=${start_wal_location#*file } && start_wal_log=${start_wal_log/)/} - - # fetch wal logs from archive dir - echo "fetch-wal-log ${BACKUP_LOGFILE_DIR} ${WAL_DIR} ${start_wal_log} \"${BACKUP_STOP_TIME}\" false" - fetch-wal-log ${BACKUP_LOGFILE_DIR} ${WAL_DIR} ${start_wal_log} "${BACKUP_STOP_TIME}" false - fi - echo "done!"; - incrementalRestoreCommands: [] - logical: - restoreCommands: [] - incrementalRestoreCommands: [] - backupCommands: - - bash - - -c - - |- - set -e; - if [ -d ${BACKUP_DIR} ]; then - rm -rf ${BACKUP_DIR} - fi - mkdir -p ${BACKUP_DIR} - export PGPASSWORD=${DB_PASSWORD} - {{- .Files.Get "dataprotection/backup-info-collector.sh" | nindent 6 }} - - START_TIME=`get_current_time` - WALG_FILE_PREFIX=${BACKUP_DIR} PGHOST=${DB_HOST} PGUSER=${DB_USER} PGPASSWORD=${DB_PASSWORD} PGPORT=5432 wal-g backup-push ${DATA_DIR} - - STOP_TIME="" - stop_sentinel_file=$(find ${BACKUP_DIR}/basebackups_005 -name "*backup_stop_sentinel.json") - if [ -f $stop_sentinel_file ];then - result_json=$(cat $stop_sentinel_file) - STOP_TIME=$(echo $result_json | jq -r ".FinishTime") - START_TIME=$(echo $result_json | jq -r ".StartTime") - fi - # stat and save the backup information - stat_and_save_backup_info $START_TIME $STOP_TIME - incrementalBackupCommands: [] diff --git a/deploy/postgresql/templates/clusterdefinition.yaml b/deploy/postgresql/templates/clusterdefinition.yaml index 5dff801861a..6543c773067 100644 --- a/deploy/postgresql/templates/clusterdefinition.yaml +++ b/deploy/postgresql/templates/clusterdefinition.yaml @@ -113,7 +113,7 @@ spec: - /kb-scripts/init_container.sh volumeMounts: - name: data - mountPath: /home/postgres/pgdata + mountPath: {{ .Values.dataMountPath }} - name: postgresql-config mountPath: /home/postgres/conf - name: scripts @@ -145,7 +145,7 @@ spec: - name: dshm mountPath: /dev/shm - name: data - mountPath: /home/postgres/pgdata + mountPath: {{ .Values.dataMountPath }} - name: postgresql-config mountPath: /home/postgres/conf - name: scripts @@ -171,7 +171,7 @@ spec: - name: KUBERNETES_LABELS value: '{"app.kubernetes.io/instance":"$(KB_CLUSTER_NAME)","apps.kubeblocks.io/component-name":"$(KB_COMP_NAME)"}' - name: RESTORE_DATA_DIR - value: /home/postgres/pgdata/kb_restore + value: {{ .Values.dataMountPath }}/kb_restore - name: KB_PG_CONFIG_PATH value: /home/postgres/conf/postgresql.conf - name: SPILO_CONFIGURATION @@ -183,7 +183,7 @@ spec: - name: ALLOW_NOSSL value: "true" - name: PGROOT - value: /home/postgres/pgdata/pgroot + value: {{ .Values.dataMountPath }}/pgroot - name: POD_IP valueFrom: fieldRef: diff --git a/deploy/postgresql/values.yaml b/deploy/postgresql/values.yaml index acc91791a7a..86a01ebc24b 100644 --- a/deploy/postgresql/values.yaml +++ b/deploy/postgresql/values.yaml @@ -111,5 +111,7 @@ pgbouncer: tag: 1.19.0 pullPolicy: IfNotPresent +dataMountPath: /home/postgres/pgdata + logConfigs: running: /home/postgres/pgdata/pgroot/data/log/postgresql-* \ No newline at end of file diff --git a/deploy/qdrant/scripts/qdrant-backup.sh b/deploy/qdrant/scripts/qdrant-backup.sh index 45d8766182b..a8e5da4be97 100644 --- a/deploy/qdrant/scripts/qdrant-backup.sh +++ b/deploy/qdrant/scripts/qdrant-backup.sh @@ -1,17 +1,20 @@ #!/usr/bin/env bash set -e -mkdir -p ${BACKUP_DIR} -endpoint=http://${DB_HOST}:6333 +mkdir -p ${DP_BACKUP_DIR} +endpoint=http://${DP_DB_HOST}:6333 snapshot=`curl -XPOST ${endpoint}/snapshots` status=`echo ${snapshot} | jq '.status'` -if [ "${status}" != "ok" ]; then +if [ "${status}" != "ok" ] && [ "${status}" != "\"ok\"" ]; then echo "backup failed, status: ${status}" exit 1 fi name=`echo ${snapshot} | jq '.result.name'` -curl ${endpoint}/snapshots/${name} --output ${BACKUP_DIR}/${BACKUP_NAME}.snapshot +curl ${endpoint}/snapshots/${name} --output ${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.snapshot -curl -XDELETE ${endpoint}/snapshots/${name} \ No newline at end of file +curl -XDELETE ${endpoint}/snapshots/${name} + +TOTAL_SIZE=$(du -shx ${DP_BACKUP_DIR}|awk '{print $1}') +echo "{\"totalSize\":\"$TOTAL_SIZE\"}" > ${DP_BACKUP_DIR}/backup.info \ No newline at end of file diff --git a/deploy/qdrant/scripts/qdrant-restore.sh b/deploy/qdrant/scripts/qdrant-restore.sh index bb7d6ecd909..fc0d3007cee 100644 --- a/deploy/qdrant/scripts/qdrant-restore.sh +++ b/deploy/qdrant/scripts/qdrant-restore.sh @@ -9,7 +9,7 @@ if [ ! -z "${res}" ]; then fi # start qdrant restore process -qdrant --storage-snapshot ${BACKUP_DIR}/${BACKUP_NAME} --config-path /qdrant/config/config.yaml --force-snapshot --uri http://localhost:6333 & +qdrant --storage-snapshot ${DP_BACKUP_DIR}/${DP_BACKUP_NAME} --config-path /qdrant/config/config.yaml --force-snapshot --uri http://localhost:6333 & # wait until restore finished until curl http://localhost:6333/cluster; do sleep 1; done diff --git a/deploy/qdrant/templates/actionset-datafile.yaml b/deploy/qdrant/templates/actionset-datafile.yaml new file mode 100644 index 00000000000..6ea586031b4 --- /dev/null +++ b/deploy/qdrant/templates/actionset-datafile.yaml @@ -0,0 +1,35 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: qdrant-snapshot + labels: + clusterdefinition.kubeblocks.io/name: qdrant + {{- include "qdrant.labels" . | nindent 4 }} +spec: + backupType: Full + env: + - name: DATA_DIR + value: {{ .Values.dataMountPath }}/ + backup: + preBackup: [] + postBackup: [] + backupData: + image: apecloud/curl-jq:latest + runOnTargetPodNode: false + command: + - sh + - -c + - | + {{- .Files.Get "scripts/qdrant-backup.sh" | nindent 8 }} + syncProgress: + enabled: true + intervalSeconds: 5 + restore: + prepareData: + image: apecloud/curl-jq:latest + command: + - sh + - -c + - | + {{- .Files.Get "scripts/qdrant-restore.sh" | nindent 8 }} + postReady: [] \ No newline at end of file diff --git a/deploy/qdrant/templates/backuppolicytemplate.yaml b/deploy/qdrant/templates/backuppolicytemplate.yaml index f3501dda115..5b58864a1dc 100644 --- a/deploy/qdrant/templates/backuppolicytemplate.yaml +++ b/deploy/qdrant/templates/backuppolicytemplate.yaml @@ -9,16 +9,24 @@ spec: clusterDefinitionRef: qdrant backupPolicies: - componentDefRef: qdrant - retention: - ttl: 7d - schedule: - snapshot: - enable: false - cronExpression: "0 18 * * 0" - snapshot: - target: - connectionCredentialKey: - passwordKey: password - usernameKey: username - datafile: - backupToolName: qdrant-snapshot \ No newline at end of file + retentionPeriod: 7d + backupMethods: + - name: datafile + snapshotVolumes: false + actionSetName: qdrant-snapshot + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} + - name: volume-snapshot + snapshotVolumes: true + targetVolumes: + volumes: + - data + schedules: + - backupMethod: datafile + enabled: false + cronExpression: "0 18 * * 0" + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * 0" \ No newline at end of file diff --git a/deploy/qdrant/templates/backuptool.yaml b/deploy/qdrant/templates/backuptool.yaml deleted file mode 100644 index 878065bf496..00000000000 --- a/deploy/qdrant/templates/backuptool.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: qdrant-snapshot - labels: - clusterdefinition.kubeblocks.io/name: qdrant - {{- include "qdrant.labels" . | nindent 4 }} -spec: - image: apecloud/curl-jq:latest - deployKind: job - env: - - name: DATA_DIR - value: /qdrant/storage/ - physical: - restoreCommands: - - |- - {{- .Files.Get "scripts/qdrant-restore.sh" | nindent 8 }} - incrementalRestoreCommands: [] - logical: - restoreCommands: [] - incrementalRestoreCommands: [] - backupCommands: - - |- - {{- .Files.Get "scripts/qdrant-backup.sh" | nindent 6 }} - incrementalBackupCommands: [] diff --git a/deploy/qdrant/templates/clusterdefinition.yaml b/deploy/qdrant/templates/clusterdefinition.yaml index 418f7dcbc01..b613df43501 100644 --- a/deploy/qdrant/templates/clusterdefinition.yaml +++ b/deploy/qdrant/templates/clusterdefinition.yaml @@ -130,7 +130,7 @@ spec: volumeMounts: - mountPath: /qdrant/config/ name: qdrant-config - - mountPath: /qdrant/storage + - mountPath: {{ .Values.dataMountPath }} name: data - mountPath: /qdrant/scripts name: scripts diff --git a/deploy/qdrant/values.yaml b/deploy/qdrant/values.yaml index 118b1cd1be0..82bb5236a27 100644 --- a/deploy/qdrant/values.yaml +++ b/deploy/qdrant/values.yaml @@ -28,4 +28,6 @@ images: ## @param debugEnabled enables containers' debug logging ## -debugEnabled: true \ No newline at end of file +debugEnabled: true + +dataMountPath: /qdrant/storage \ No newline at end of file diff --git a/deploy/redis/dataprotection/backup.sh b/deploy/redis/dataprotection/backup.sh new file mode 100644 index 00000000000..a60057b4a7e --- /dev/null +++ b/deploy/redis/dataprotection/backup.sh @@ -0,0 +1,19 @@ +set -e +connect_url="redis-cli -h ${DP_DB_HOST} -p ${DP_DB_PORT} -a ${DP_DB_PASSWORD}" +last_save=$(${connect_url} LASTSAVE) +echo "INFO: start BGSAVE" +${connect_url} BGSAVE +echo "INFO: wait for saving rdb successfully" +while true; do + end_save=$(${connect_url} LASTSAVE) + if [ $end_save -ne $last_save ];then + break + fi + sleep 1 +done +echo "INFO: start to save data file..." +mkdir -p ${DP_BACKUP_DIR} && cd ${DATA_DIR} +tar -czvf ${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.tar.gz ./ +echo "INFO: save data file successfully" +TOTAL_SIZE=$(du -shx ${DP_BACKUP_DIR}|awk '{print $1}') +echo "{\"totalSize\":\"$TOTAL_SIZE\"}" > ${DP_BACKUP_DIR}/backup.info && sync \ No newline at end of file diff --git a/deploy/redis/dataprotection/restore.sh b/deploy/redis/dataprotection/restore.sh new file mode 100644 index 00000000000..c1e72b0f2ed --- /dev/null +++ b/deploy/redis/dataprotection/restore.sh @@ -0,0 +1,12 @@ +set -e +mkdir -p ${DATA_DIR} +res=`find ${DATA_DIR} -type f` +data_protection_file=${DATA_DIR}/.kb-data-protection +if [ ! -z "${res}" ] && [ ! -f ${data_protection_file} ]; then + echo "${DATA_DIR} is not empty! Please make sure that the directory is empty before restoring the backup." + exit 1 +fi +# touch placeholder file +touch ${data_protection_file} +tar -xvf ${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.tar.gz -C ${DATA_DIR} +rm -rf ${data_protection_file} && sync \ No newline at end of file diff --git a/deploy/redis/templates/backupactionset.yaml b/deploy/redis/templates/backupactionset.yaml new file mode 100644 index 00000000000..f406fa92461 --- /dev/null +++ b/deploy/redis/templates/backupactionset.yaml @@ -0,0 +1,37 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: redis-physical-backup + labels: + clusterdefinition.kubeblocks.io/name: redis + {{- include "redis.labels" . | nindent 4 }} +spec: + backupType: Full + env: + - name: DATA_DIR + value: {{ .Values.dataMountPath }} + - name: DP_DB_PORT + value: "6379" + backup: + preBackup: [] + postBackup: [] + backupData: + image: {{ include "redis.image" . }} + runOnTargetPodNode: true + syncProgress: + enabled: true + intervalSeconds: 5 + command: + - bash + - -c + - | + {{- .Files.Get "dataprotection/backup.sh" | nindent 8 }} + restore: + prepareData: + image: {{ include "redis.image" . }} + command: + - sh + - -c + - | + {{- .Files.Get "dataprotection/restore.sh" | nindent 8 }} + postReady: [] diff --git a/deploy/redis/templates/backuppolicytemplate.yaml b/deploy/redis/templates/backuppolicytemplate.yaml index 84b1fe46435..251712f4481 100644 --- a/deploy/redis/templates/backuppolicytemplate.yaml +++ b/deploy/redis/templates/backuppolicytemplate.yaml @@ -9,26 +9,26 @@ spec: clusterDefinitionRef: redis backupPolicies: - componentDefRef: redis - retention: - ttl: 7d - schedule: - startingDeadlineMinutes: 120 - snapshot: - enable: false - cronExpression: "0 18 * * 0" - datafile: - enable: false - cronExpression: "0 18 * * 0" - snapshot: - target: - connectionCredentialKey: - passwordKey: password - usernameKey: username - datafile: - backupToolName: redis-physical-backup-tool - backupsHistoryLimit: 7 - target: - role: secondary - backupStatusUpdates: - - updateStage: post - useTargetPodServiceAccount: true \ No newline at end of file + retentionPeriod: 7d + target: + role: secondary + backupMethods: + - name: datafile + snapshotVolumes: false + actionSetName: redis-physical-backup + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} + - name: volume-snapshot + snapshotVolumes: true + targetVolumes: + volumes: + - data + schedules: + - backupMethod: datafile + enabled: false + cronExpression: "0 18 * * 0" + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * 0" \ No newline at end of file diff --git a/deploy/redis/templates/backuptool.yaml b/deploy/redis/templates/backuptool.yaml deleted file mode 100644 index a6a82cfe881..00000000000 --- a/deploy/redis/templates/backuptool.yaml +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: redis-physical-backup-tool - labels: - clusterdefinition.kubeblocks.io/name: redis - {{- include "redis.labels" . | nindent 4 }} -spec: - image: {{ include "redis.image" . }} - deployKind: job - env: - - name: DATA_DIR - value: /data - physical: - restoreCommands: - - sh - - -c - - | - set -e - mkdir -p ${DATA_DIR} - res=`find ${DATA_DIR} -type f` - data_protection_file=${DATA_DIR}/.kb-data-protection - if [ ! -z "${res}" ] && [ ! -f ${data_protection_file} ]; then - echo "${DATA_DIR} is not empty! Please make sure that the directory is empty before restoring the backup." - exit 1 - fi - touch ${data_protection_file} && sync - tar -xvf ${BACKUP_DIR}/${BACKUP_NAME}.tar.gz -C ${DATA_DIR} - rm -rf ${data_protection_file} && sync - logical: - restoreCommands: [] - backupCommands: - - bash - - -c - - | - set -e - connect_url="redis-cli -h ${DB_HOST} -p 6379 -a ${DB_PASSWORD}" - last_save=$(${connect_url} LASTSAVE) - echo "INFO: start BGSAVE" - ${connect_url} BGSAVE - echo "INFO: wait for saving rdb successfully" - while true; do - end_save=$(${connect_url} LASTSAVE) - if [ $end_save -ne $last_save ];then - break - fi - sleep 1 - done - echo "INFO: start to save data file..." - mkdir -p ${BACKUP_DIR} && cd ${DATA_DIR} && sync - tar -czvf ${BACKUP_DIR}/${BACKUP_NAME}.tar.gz ./ - echo "INFO: save data file successfully" && sync - TOTAL_SIZE=$(du -shx ${BACKUP_DIR}|awk '{print $1}') - echo "{\"totalSize\":\"$TOTAL_SIZE\",\"manifests\":{\"backupTool\":{\"uploadTotalSize\":\"${TOTAL_SIZE}\"}}}" > ${BACKUP_DIR}/backup.info && sync diff --git a/deploy/redis/templates/clusterdefinition.yaml b/deploy/redis/templates/clusterdefinition.yaml index 766917176f8..2fe21b69ce1 100644 --- a/deploy/redis/templates/clusterdefinition.yaml +++ b/deploy/redis/templates/clusterdefinition.yaml @@ -64,7 +64,7 @@ spec: containerPort: 6379 volumeMounts: - name: data - mountPath: /data + mountPath: {{ .Values.dataMountPath }} - name: redis-config mountPath: /etc/conf - name: scripts @@ -231,7 +231,7 @@ spec: imagePullPolicy: IfNotPresent volumeMounts: - name: data - mountPath: /data + mountPath: {{ .Values.dataMountPath }} - name: redis-config mountPath: /etc/conf - name: sentinel-conf @@ -278,7 +278,7 @@ spec: name: redis-sentinel volumeMounts: - name: data - mountPath: /data + mountPath: {{ .Values.dataMountPath }} - name: redis-config mountPath: /etc/conf - name: sentinel-conf diff --git a/deploy/redis/values.yaml b/deploy/redis/values.yaml index ca41d6f8d52..636cdfe7f56 100644 --- a/deploy/redis/values.yaml +++ b/deploy/redis/values.yaml @@ -17,6 +17,7 @@ imagePullSecrets: [] nameOverride: "" fullnameOverride: "" clusterVersionOverride: "" +dataMountPath: /data logConfigs: running: /data/running.log diff --git a/deploy/weaviate/templates/backuppolicytemplate.yaml b/deploy/weaviate/templates/backuppolicytemplate.yaml index 579493cead8..ab7dc465956 100644 --- a/deploy/weaviate/templates/backuppolicytemplate.yaml +++ b/deploy/weaviate/templates/backuppolicytemplate.yaml @@ -9,14 +9,14 @@ spec: clusterDefinitionRef: weaviate backupPolicies: - componentDefRef: weaviate - retention: - ttl: 7d - schedule: - snapshot: - enable: false - cronExpression: "0 18 * * 0" - snapshot: - target: - connectionCredentialKey: - passwordKey: password - usernameKey: username \ No newline at end of file + retentionPeriod: 7d + backupMethods: + - name: volume-snapshot + snapshotVolumes: true + targetVolumes: + volumes: + - data + schedules: + - backupMethod: volume-snapshot + enabled: false + cronExpression: "0 18 * * 0" \ No newline at end of file diff --git a/docs/user_docs/cli/kbcli_backup_create.md b/docs/user_docs/cli/kbcli_backup_create.md index dd77401b065..5dbced23bc3 100644 --- a/docs/user_docs/cli/kbcli_backup_create.md +++ b/docs/user_docs/cli/kbcli_backup_create.md @@ -26,8 +26,8 @@ kbcli backup create NAME [flags] ``` --cluster string Cluster name -h, --help help for create + --method string Backup type (default "snapshot") --policy string Backup policy name, this flag will be ignored when backup-type is snapshot - --type string Backup type (default "snapshot") ``` ### Options inherited from parent commands diff --git a/docs/user_docs/cli/kbcli_cluster_backup.md b/docs/user_docs/cli/kbcli_cluster_backup.md index 483f8772985..28b11ab010a 100644 --- a/docs/user_docs/cli/kbcli_cluster_backup.md +++ b/docs/user_docs/cli/kbcli_cluster_backup.md @@ -30,9 +30,9 @@ kbcli cluster backup NAME [flags] ``` -h, --help help for backup + --method string Backup method that defined in backup policy --name string Backup name --policy string Backup policy name, this flag will be ignored when backup-type is snapshot - --type string Backup type (default "snapshot") ``` ### Options inherited from parent commands diff --git a/docs/user_docs/cli/kbcli_cluster_create.md b/docs/user_docs/cli/kbcli_cluster_create.md index f17a5bde74d..658d863ca05 100644 --- a/docs/user_docs/cli/kbcli_cluster_create.md +++ b/docs/user_docs/cli/kbcli_cluster_create.md @@ -112,11 +112,11 @@ kbcli cluster create [NAME] [flags] --restore-to-time string Set a time for point in time recovery --set stringArray Set the cluster resource including cpu, memory, replicas and storage, each set corresponds to a component.(e.g. --set cpu=1,memory=1Gi,replicas=3,storage=20Gi or --set class=general-1c1g) -f, --set-file string Use yaml file, URL, or stdin to set the cluster resource - --source-cluster string Set a source cluster for point in time recovery --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") --termination-policy string Termination policy, one of: (DoNotTerminate, Halt, Delete, WipeOut) (default "Delete") --tolerations strings Tolerations for cluster, such as "key=value:effect, key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' --topology-keys stringArray Topology keys for affinity + --volume-restore-policy string the volume claim restore policy, supported values: [Serial, Parallel] (default "Parallel") ``` ### Options inherited from parent commands diff --git a/docs/user_docs/cli/kbcli_cluster_restore.md b/docs/user_docs/cli/kbcli_cluster_restore.md index afe98b250e8..bb69305794e 100644 --- a/docs/user_docs/cli/kbcli_cluster_restore.md +++ b/docs/user_docs/cli/kbcli_cluster_restore.md @@ -22,10 +22,10 @@ kbcli cluster restore [flags] ### Options ``` - --backup string Backup name - -h, --help help for restore - --restore-to-time string point in time recovery(PITR) - --source-cluster string source cluster name + --backup string Backup name + -h, --help help for restore + --restore-to-time string point in time recovery(PITR) + --volume-restore-policy string the volume claim restore policy, supported values: [Serial, Parallel] (default "Parallel") ``` ### Options inherited from parent commands diff --git a/internal/cli/cluster/cluster.go b/internal/cli/cluster/cluster.go index e147c0d8ac3..f5c9c9df89c 100644 --- a/internal/cli/cluster/cluster.go +++ b/internal/cli/cluster/cluster.go @@ -40,6 +40,7 @@ import ( "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/cli/util" "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) // ConditionsError cluster displays this status on list cmd when the status of ApplyResources or ProvisioningStarted condition is "False". @@ -182,7 +183,7 @@ func (o *ObjectsGetter) Get() (*ClusterObjects, error) { // filter back-up job pod for _, pod := range objs.Pods.Items { labels := pod.GetLabels() - if labels[constant.DataProtectionLabelBackupNameKey] == "" { + if labels[dptypes.DataProtectionLabelBackupNameKey] == "" { podList = append(podList, pod) } } @@ -247,7 +248,7 @@ func (o *ObjectsGetter) Get() (*ClusterObjects, error) { } // filter backups with cluster uid for excluding same cluster name for _, v := range backups { - sourceClusterUID := v.Labels[constant.DataProtectionLabelClusterUIDKey] + sourceClusterUID := v.Labels[dptypes.DataProtectionLabelClusterUIDKey] if sourceClusterUID == "" || sourceClusterUID == string(objs.Cluster.UID) { objs.Backups = append(objs.Backups, v) } diff --git a/internal/cli/cluster/cluster_test.go b/internal/cli/cluster/cluster_test.go index 7b777f25fae..055fdb036b3 100644 --- a/internal/cli/cluster/cluster_test.go +++ b/internal/cli/cluster/cluster_test.go @@ -26,9 +26,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" - dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/cli/testing" - "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) var _ = Describe("cluster util", func() { @@ -42,7 +41,7 @@ var _ = Describe("cluster util", func() { baseObjsWithBackupPods := func() []runtime.Object { podsWithBackup := testing.FakePods(4, testing.Namespace, testing.ClusterName) labels := podsWithBackup.Items[0].GetLabels() - labels[constant.DataProtectionLabelBackupNameKey] = string(dpv1alpha1.BackupTypeLogFile) + labels[dptypes.DataProtectionLabelBackupNameKey] = testing.BackupName podsWithBackup.Items[0].SetLabels(labels) return []runtime.Object{ podsWithBackup, diff --git a/internal/cli/cmd/backup/create.go b/internal/cli/cmd/backup/create.go index 7f35944afd9..ab121cbe877 100644 --- a/internal/cli/cmd/backup/create.go +++ b/internal/cli/cmd/backup/create.go @@ -86,7 +86,7 @@ func newCreateCommand(f cmdutil.Factory, streams genericclioptions.IOStreams) *c }, } - cmd.Flags().StringVar(&o.BackupType, "type", "snapshot", "Backup type") + cmd.Flags().StringVar(&o.BackupMethod, "method", "snapshot", "Backup type") cmd.Flags().StringVar(&clusterName, "cluster", "", "Cluster name") cmd.Flags().StringVar(&o.BackupPolicy, "policy", "", "Backup policy name, this flag will be ignored when backup-type is snapshot") util.RegisterClusterCompletionFunc(cmd, f) diff --git a/internal/cli/cmd/backuprepo/create.go b/internal/cli/cmd/backuprepo/create.go index 57a0ae3857e..73db76b88c4 100644 --- a/internal/cli/cmd/backuprepo/create.go +++ b/internal/cli/cmd/backuprepo/create.go @@ -47,13 +47,13 @@ import ( "github.com/xeipuuv/gojsonschema" "golang.org/x/exp/slices" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/internal/cli/printer" "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/cli/util" "github.com/apecloud/kubeblocks/internal/cli/util/flags" - "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) const ( @@ -306,7 +306,7 @@ func (o *createOptions) validate(cmd *cobra.Command) error { return err } for _, item := range list.Items { - if item.GetAnnotations()[constant.DefaultBackupRepoAnnotationKey] == "true" { + if item.GetAnnotations()[dptypes.DefaultBackupRepoAnnotationKey] == "true" { name := item.GetName() return fmt.Errorf("there is already a default backup repo \"%s\","+ " please don't specify the --default flag,\n"+ @@ -346,12 +346,12 @@ func (o *createOptions) createCredentialSecret() (*corev1.Secret, error) { } func (o *createOptions) buildBackupRepoObject(secret *corev1.Secret) (*unstructured.Unstructured, error) { - backupRepo := &dataprotectionv1alpha1.BackupRepo{ + backupRepo := &dpv1alpha1.BackupRepo{ TypeMeta: metav1.TypeMeta{ APIVersion: fmt.Sprintf("%s/%s", types.DPAPIGroup, types.DPAPIVersion), Kind: "BackupRepo", }, - Spec: dataprotectionv1alpha1.BackupRepoSpec{ + Spec: dpv1alpha1.BackupRepoSpec{ StorageProviderRef: o.storageProvider, PVReclaimPolicy: corev1.PersistentVolumeReclaimPolicy(o.pvReclaimPolicy), VolumeCapacity: resource.MustParse(o.volumeCapacity), @@ -371,7 +371,7 @@ func (o *createOptions) buildBackupRepoObject(secret *corev1.Secret) (*unstructu } if o.isDefault { backupRepo.Annotations = map[string]string{ - constant.DefaultBackupRepoAnnotationKey: "true", + dptypes.DefaultBackupRepoAnnotationKey: "true", } } obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(backupRepo) diff --git a/internal/cli/cmd/backuprepo/describe.go b/internal/cli/cmd/backuprepo/describe.go index 87edd2764d1..dba6d0d0566 100644 --- a/internal/cli/cmd/backuprepo/describe.go +++ b/internal/cli/cmd/backuprepo/describe.go @@ -34,7 +34,7 @@ import ( cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/util/templates" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/cli/printer" "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/cli/util" @@ -111,7 +111,7 @@ func (o *describeBackupRepoOptions) run() error { if err != nil { return err } - backupRepo := &dataprotectionv1alpha1.BackupRepo{} + backupRepo := &dpv1alpha1.BackupRepo{} if err = runtime.DefaultUnstructuredConverter.FromUnstructured(backupRepoObj.Object, backupRepo); err != nil { return err } @@ -123,7 +123,7 @@ func (o *describeBackupRepoOptions) run() error { return nil } -func (o *describeBackupRepoOptions) printBackupRepo(backupRepo *dataprotectionv1alpha1.BackupRepo) error { +func (o *describeBackupRepoOptions) printBackupRepo(backupRepo *dpv1alpha1.BackupRepo) error { printer.PrintLine("Summary:") printer.PrintPairStringToLine("Name", backupRepo.Name) printer.PrintPairStringToLine("Provider", backupRepo.Spec.StorageProviderRef) @@ -153,7 +153,7 @@ func (o *describeBackupRepoOptions) printBackupRepo(backupRepo *dataprotectionv1 return nil } -func countBackupNumsAndSize(dynamic dynamic.Interface, backupRepo *dataprotectionv1alpha1.BackupRepo) (int, string, error) { +func countBackupNumsAndSize(dynamic dynamic.Interface, backupRepo *dpv1alpha1.BackupRepo) (int, string, error) { var size uint64 count := 0 @@ -166,12 +166,12 @@ func countBackupNumsAndSize(dynamic dynamic.Interface, backupRepo *dataprotectio count = len(backupList.Items) for _, obj := range backupList.Items { - backup := &dataprotectionv1alpha1.Backup{} + backup := &dpv1alpha1.Backup{} if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, backup); err != nil { return count, humanize.Bytes(size), err } // if backup doesn't complete, we don't count it's size - if backup.Status.Phase != dataprotectionv1alpha1.BackupCompleted { + if backup.Status.Phase != dpv1alpha1.BackupPhaseCompleted { continue } backupSize, err := humanize.ParseBytes(backup.Status.TotalSize) diff --git a/internal/cli/cmd/builder/template/helm_helper.go b/internal/cli/cmd/builder/template/helm_helper.go index 50d3b0f3841..5a1ba317a15 100644 --- a/internal/cli/cmd/builder/template/helm_helper.go +++ b/internal/cli/cmd/builder/template/helm_helper.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/generics" ) @@ -120,8 +120,8 @@ func createObjectsFromYaml(yamlBytes []byte) ([]client.Object, error) { objects = append(objects, CreateTypedObjectFromYamlByte(doc, generics.ClusterVersionSignature)) case kindFromResource(appsv1alpha1.BackupPolicyTemplate{}): objects = append(objects, CreateTypedObjectFromYamlByte(doc, generics.BackupPolicyTemplateSignature)) - case kindFromResource(dataprotectionv1alpha1.BackupTool{}): - objects = append(objects, CreateTypedObjectFromYamlByte(doc, generics.BackupToolSignature)) + case kindFromResource(dpv1alpha1.ActionSet{}): + objects = append(objects, CreateTypedObjectFromYamlByte(doc, generics.ActionSetSignature)) } } return objects, nil diff --git a/internal/cli/cmd/cluster/config_util_test.go b/internal/cli/cmd/cluster/config_util_test.go index b239e906994..05c76c604f2 100644 --- a/internal/cli/cmd/cluster/config_util_test.go +++ b/internal/cli/cmd/cluster/config_util_test.go @@ -53,7 +53,7 @@ func NewFakeOperationsOptions(ns, cName string, opsType appsv1alpha1.OpsType, ob types.ClusterGVR(): types.KindCluster + "List", types.ConfigConstraintGVR(): types.KindConfigConstraint + "List", types.BackupGVR(): types.KindBackup + "List", - types.RestoreJobGVR(): types.KindRestoreJob + "List", + types.RestoreGVR(): types.KindRestore + "List", types.OpsGVR(): types.KindOps + "List", } baseOptions.Dynamic = dynamicfakeclient.NewSimpleDynamicClientWithCustomListKinds(scheme.Scheme, listMapping, objs...) diff --git a/internal/cli/cmd/cluster/create.go b/internal/cli/cmd/cluster/create.go index acf054aa9b2..78e1291d667 100755 --- a/internal/cli/cmd/cluster/create.go +++ b/internal/cli/cmd/cluster/create.go @@ -53,7 +53,7 @@ import ( "k8s.io/kubectl/pkg/util/templates" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/class" "github.com/apecloud/kubeblocks/internal/cli/cluster" "github.com/apecloud/kubeblocks/internal/cli/create" @@ -227,9 +227,9 @@ type CreateOptions struct { RBACEnabled bool `json:"-"` Storages []string `json:"-"` // backup name to restore in creation - Backup string `json:"backup,omitempty"` - RestoreTime string `json:"restoreTime,omitempty"` - SourceCluster string `json:"sourceCluster,omitempty"` + Backup string `json:"backup,omitempty"` + RestoreTime string `json:"restoreTime,omitempty"` + RestoreManagementPolicy string `json:"-"` // backup config BackupConfig *appsv1alpha1.ClusterBackup `json:"backupConfig,omitempty"` @@ -262,7 +262,7 @@ func NewCreateCmd(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra cmd.Flags().StringArrayVar(&o.Storages, "pvc", []string{}, "Set the cluster detail persistent volume claim, each '--pvc' corresponds to a component, and will override the simple configurations about storage by --set (e.g. --pvc type=mysql,name=data,mode=ReadWriteOnce,size=20Gi --pvc type=mysql,name=log,mode=ReadWriteOnce,size=1Gi)") cmd.Flags().StringVar(&o.Backup, "backup", "", "Set a source backup to restore data") cmd.Flags().StringVar(&o.RestoreTime, "restore-to-time", "", "Set a time for point in time recovery") - cmd.Flags().StringVar(&o.SourceCluster, "source-cluster", "", "Set a source cluster for point in time recovery") + cmd.Flags().StringVar(&o.RestoreManagementPolicy, "volume-restore-policy", "Parallel", "the volume claim restore policy, supported values: [Serial, Parallel]") cmd.Flags().BoolVar(&o.RBACEnabled, "rbac-enabled", false, "Specify whether rbac resources will be created by kbcli, otherwise KubeBlocks server will try to create rbac resources") cmd.PersistentFlags().BoolVar(&o.EditBeforeCreate, "edit", o.EditBeforeCreate, "Edit the API resource before creating") cmd.PersistentFlags().StringVar(&o.DryRun, "dry-run", "none", `Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent.`) @@ -308,7 +308,7 @@ func setMonitor(monitoringInterval uint8, components []map[string]interface{}) { } } -func getRestoreFromBackupAnnotation(backup *dataprotectionv1alpha1.Backup, compSpecsCount int, firstCompName string) (string, error) { +func getRestoreFromBackupAnnotation(backup *dpv1alpha1.Backup, managementPolicy string, compSpecsCount int, firstCompName string, restoreTime string) (string, error) { componentName := backup.Labels[constant.KBAppComponentLabelKey] if len(componentName) == 0 { if compSpecsCount != 1 { @@ -316,11 +316,19 @@ func getRestoreFromBackupAnnotation(backup *dataprotectionv1alpha1.Backup, compS } componentName = firstCompName } - restoreFromBackupAnnotation := fmt.Sprintf(`{"%s":"%s"}`, componentName, backup.Name) + backupNameString := fmt.Sprintf(`"%s":"%s"`, constant.BackupNameKeyForRestore, backup.Name) + backupNamespaceString := fmt.Sprintf(`"%s":"%s"`, constant.BackupNamespaceKeyForRestore, backup.Namespace) + managementPolicyString := fmt.Sprintf(`"%s":"%s"`, constant.VolumeManagementPolicyKeyForRestore, managementPolicy) + var restoreTimeString string + if restoreTime != "" { + restoreTimeString = fmt.Sprintf(`",%s":"%s"`, constant.RestoreTimeKeyForRestore, restoreTime) + } + + restoreFromBackupAnnotation := fmt.Sprintf(`{"%s":{%s,%s,%s%s}}`, componentName, backupNameString, backupNamespaceString, managementPolicyString, restoreTimeString) return restoreFromBackupAnnotation, nil } -func getSourceClusterFromBackup(backup *dataprotectionv1alpha1.Backup) (*appsv1alpha1.Cluster, error) { +func getSourceClusterFromBackup(backup *dpv1alpha1.Backup) (*appsv1alpha1.Cluster, error) { sourceCluster := &appsv1alpha1.Cluster{} sourceClusterJSON := backup.Annotations[constant.ClusterSnapshotAnnotationKey] if err := json.Unmarshal([]byte(sourceClusterJSON), sourceCluster); err != nil { @@ -330,43 +338,21 @@ func getSourceClusterFromBackup(backup *dataprotectionv1alpha1.Backup) (*appsv1a return sourceCluster, nil } -func getBackupObjectFromRestoreArgs(o *CreateOptions, backup *dataprotectionv1alpha1.Backup) error { - if o.Backup != "" { - if err := cluster.GetK8SClientObject(o.Dynamic, backup, types.BackupGVR(), o.Namespace, o.Backup); err != nil { - return err - } - } else if o.RestoreTime != "" { - createRestoreOptions := CreateRestoreOptions{ - SourceCluster: o.SourceCluster, - RestoreTimeStr: o.RestoreTime, - } - createRestoreOptions.Dynamic = o.Dynamic - createRestoreOptions.Namespace = o.Namespace - if err := createRestoreOptions.validateRestoreTime(); err != nil { - return err - } - objs, err := o.Dynamic.Resource(types.BackupGVR()).Namespace(o.Namespace). - List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", - constant.AppInstanceLabelKey, o.SourceCluster), - }) - if err != nil { - return err - } - if len(objs.Items) == 0 { - return fmt.Errorf("can not found any backup to restore time") - } - - return runtime.DefaultUnstructuredConverter.FromUnstructured(objs.Items[0].UnstructuredContent(), backup) +func getBackupObjectFromRestoreArgs(o *CreateOptions, backup *dpv1alpha1.Backup) error { + if o.Backup == "" { + return nil + } + if err := cluster.GetK8SClientObject(o.Dynamic, backup, types.BackupGVR(), o.Namespace, o.Backup); err != nil { + return err } return nil } func fillClusterInfoFromBackup(o *CreateOptions, cls **appsv1alpha1.Cluster) error { - if o.Backup == "" && o.RestoreTime == "" && o.SourceCluster == "" { + if o.Backup == "" { return nil } - backup := &dataprotectionv1alpha1.Backup{} + backup := &dpv1alpha1.Backup{} if err := getBackupObjectFromRestoreArgs(o, backup); err != nil { return err } @@ -374,12 +360,6 @@ func fillClusterInfoFromBackup(o *CreateOptions, cls **appsv1alpha1.Cluster) err if err != nil { return err } - // HACK/TODO: apecloud-mysql pitr only support one replica for PITR. - if backupCluster.Spec.ClusterDefRef == apeCloudMysql && o.RestoreTime != "" { - for _, c := range backupCluster.Spec.ComponentSpecs { - c.Replicas = 1 - } - } curCluster := *cls if curCluster == nil { curCluster = backupCluster @@ -402,54 +382,53 @@ func fillClusterInfoFromBackup(o *CreateOptions, cls **appsv1alpha1.Cluster) err return nil } +func formatRestoreTimeAndValidate(restoreTimeStr string, continuousBackup *dpv1alpha1.Backup) (string, error) { + if restoreTimeStr == "" { + return restoreTimeStr, nil + } + restoreTime, err := util.TimeParse(restoreTimeStr, time.Second) + if err != nil { + // retry to parse time with RFC3339 format. + var errRFC error + restoreTime, errRFC = time.Parse(time.RFC3339, restoreTimeStr) + if errRFC != nil { + // if retry failure, report the error + return restoreTimeStr, err + } + } + restoreTimeStr = restoreTime.Format(time.RFC3339) + // TODO: check with Recoverable time + if !isTimeInRange(restoreTime, continuousBackup.Status.TimeRange.Start.Time, continuousBackup.Status.TimeRange.End.Time) { + return restoreTimeStr, fmt.Errorf("restore-to-time is out of time range, you can view the recoverable time: \n"+ + "\tkbcli cluster describe %s -n %s", continuousBackup.Labels[constant.AppInstanceLabelKey], continuousBackup.Namespace) + } + return restoreTimeStr, nil +} + func setBackup(o *CreateOptions, components []map[string]interface{}) error { backupName := o.Backup if len(backupName) == 0 || len(components) == 0 { return nil } - backup := &dataprotectionv1alpha1.Backup{} + backup := &dpv1alpha1.Backup{} if err := cluster.GetK8SClientObject(o.Dynamic, backup, types.BackupGVR(), o.Namespace, backupName); err != nil { return err } - if backup.Status.Phase != dataprotectionv1alpha1.BackupCompleted { + if backup.Status.Phase != dpv1alpha1.BackupPhaseCompleted { return fmt.Errorf(`backup "%s" is not completed`, backup.Name) } - restoreAnnotation, err := getRestoreFromBackupAnnotation(backup, len(components), components[0]["name"].(string)) + restoreTimeStr, err := formatRestoreTimeAndValidate(o.RestoreTime, backup) if err != nil { return err } - if o.Annotations == nil { - o.Annotations = map[string]string{} - } - o.Annotations[constant.RestoreFromBackUpAnnotationKey] = restoreAnnotation - return nil -} - -func setRestoreTime(o *CreateOptions, components []map[string]interface{}) error { - if o.RestoreTime == "" || o.SourceCluster == "" { - return nil - } - - // HACK/TODO: apecloud-mysql pitr only support one replica for PITR. - if o.ClusterDefRef == apeCloudMysql { - for _, c := range components { - if c["replicas"].(int64) > 1 { - return fmt.Errorf("apecloud-mysql only support one replica for point-in-time recovery") - } - } + restoreAnnotation, err := getRestoreFromBackupAnnotation(backup, o.RestoreManagementPolicy, len(components), components[0]["name"].(string), restoreTimeStr) + if err != nil { + return err } - if o.Annotations == nil { o.Annotations = map[string]string{} } - restoreTime, err := util.TimeParse(o.RestoreTime, time.Second) - if err != nil { - return err - } - // TODO: hack implement for multi-component cluster, how to elegantly implement pitr for multi-component cluster? - o.Annotations[constant.RestoreFromTimeAnnotationKey] = fmt.Sprintf(`{"%s":"%s"}`, components[0]["name"], restoreTime.Format(time.RFC3339)) - o.Annotations[constant.RestoreFromSrcClusterAnnotationKey] = o.SourceCluster - + o.Annotations[constant.RestoreFromBackupAnnotationKey] = restoreAnnotation return nil } @@ -548,9 +527,6 @@ func (o *CreateOptions) Complete() error { if err = setBackup(o, components); err != nil { return err } - if err = setRestoreTime(o, components); err != nil { - return err - } o.ComponentSpecs = components // TolerationsRaw looks like `["key=engineType,value=mongo,operator=Equal,effect=NoSchedule"]` after parsing by cmd @@ -1404,9 +1380,7 @@ func (o *CreateOptions) buildAnnotation(cls *appsv1alpha1.Cluster) { func (o *CreateOptions) buildBackupConfig(cls *appsv1alpha1.Cluster) error { // set default backup config - o.BackupConfig = &appsv1alpha1.ClusterBackup{ - Method: dataprotectionv1alpha1.BackupMethodSnapshot, - } + o.BackupConfig = &appsv1alpha1.ClusterBackup{} // if the cls.Backup isn't nil, use the backup config in cluster if cls != nil && cls.Spec.Backup != nil { @@ -1427,9 +1401,9 @@ func (o *CreateOptions) buildBackupConfig(cls *appsv1alpha1.Cluster) error { case "backup-enabled": o.BackupConfig.Enabled = &o.BackupEnabled case "backup-retention-period": - o.BackupConfig.RetentionPeriod = &o.BackupRetentionPeriod + o.BackupConfig.RetentionPeriod = dpv1alpha1.RetentionPeriod(o.BackupRetentionPeriod) case "backup-method": - o.BackupConfig.Method = dataprotectionv1alpha1.BackupMethod(o.BackupMethod) + o.BackupConfig.Method = o.BackupMethod case "backup-cron-expression": if _, err := cron.ParseStandard(o.BackupCronExpression); err != nil { return fmt.Errorf("invalid cron expression: %s, please see https://en.wikipedia.org/wiki/Cron", o.BackupCronExpression) diff --git a/internal/cli/cmd/cluster/create_test.go b/internal/cli/cmd/cluster/create_test.go index 8edea4d46d9..4f4854e6445 100644 --- a/internal/cli/cmd/cluster/create_test.go +++ b/internal/cli/cmd/cluster/create_test.go @@ -425,20 +425,6 @@ var _ = Describe("create", func() { Expect(setBackup(o, components)).Should(Succeed()) }) - It("set restoreTime", func() { - o := &CreateOptions{} - o.Namespace = testing.Namespace - o.RestoreTime = "Jun 16,2023 18:57:01 UTC+0800" - o.SourceCluster = testing.ClusterName - components := []map[string]interface{}{ - { - "name": testing.ClusterName, - }, - } - By("test setRestoreTime") - Expect(setRestoreTime(o, components)).Should(Succeed()) - }) - It("test fillClusterMetadataFromBackup", func() { baseBackupName := "test-backup" logBackupName := "test-logfile-backup" @@ -452,32 +438,30 @@ var _ = Describe("create", func() { o.Dynamic = dynamic o.Namespace = testing.Namespace o.RestoreTime = "Jun 16,2023 18:57:01 UTC+0800" + o.Backup = logBackupName backupLogTime, _ := util.TimeParse(o.RestoreTime, time.Second) - o.SourceCluster = clusterName buildBackupLogTime := func(d time.Duration) string { return backupLogTime.Add(d).Format(time.RFC3339) } - buildManifests := func(startTime, stopTime string) map[string]any { + buildTimeRange := func(startTime, stopTime string) map[string]any { return map[string]any{ - "backupLog": map[string]any{ - "startTime": startTime, - "stopTime": stopTime, - }, + "start": startTime, + "end": stopTime, } } - mockBackupInfo(dynamic, baseBackupName, clusterName, buildManifests(buildBackupLogTime(-30*time.Second), buildBackupLogTime(-10*time.Second)), "snapshot") - mockBackupInfo(dynamic, logBackupName, clusterName, buildManifests(buildBackupLogTime(-1*time.Minute), buildBackupLogTime(time.Minute)), "logfile") + mockBackupInfo(dynamic, baseBackupName, clusterName, buildTimeRange(buildBackupLogTime(-30*time.Second), buildBackupLogTime(-10*time.Second)), "snapshot") + mockBackupInfo(dynamic, logBackupName, clusterName, buildTimeRange(buildBackupLogTime(-1*time.Minute), buildBackupLogTime(time.Minute)), "logfile") By("fill cluster from backup success") Expect(fillClusterInfoFromBackup(o, &cluster)).Should(Succeed()) Expect(cluster.Spec.ClusterDefRef).Should(Equal(testing.ClusterDefName)) Expect(cluster.Spec.ClusterVersionRef).Should(Equal(testing.ClusterVersionName)) - By("fill cluster definition does not matched") + By("fill cluster definition does not match") o.ClusterDefRef = "test-not-match-cluster-definition" Expect(fillClusterInfoFromBackup(o, &cluster)).Should(HaveOccurred()) o.ClusterDefRef = "" - By("fill cluster version does not matched") + By("fill cluster version does not match") o.ClusterVersionRef = "test-not-match-cluster-version" Expect(fillClusterInfoFromBackup(o, &cluster)).Should(HaveOccurred()) }) @@ -492,9 +476,9 @@ var _ = Describe("create", func() { By("test backup is with snapshot method") o.BackupMethod = "snapshot" - Expect(o.Cmd.Flags().Set("backup", "snapshot")).To(Succeed()) + Expect(o.Cmd.Flags().Set("backup-method", "snapshot")).To(Succeed()) Expect(o.buildBackupConfig(cluster)).To(Succeed()) - Expect(string(o.BackupConfig.Method)).Should(Equal("snapshot")) + Expect(o.BackupConfig.Method).Should(Equal("snapshot")) By("test backup is with wrong cron expression") o.BackupCronExpression = "wrong-cron-expression" diff --git a/internal/cli/cmd/cluster/dataprotection.go b/internal/cli/cmd/cluster/dataprotection.go index 9bd60008ca1..2936247de0e 100644 --- a/internal/cli/cmd/cluster/dataprotection.go +++ b/internal/cli/cmd/cluster/dataprotection.go @@ -25,11 +25,9 @@ import ( "fmt" "reflect" "sort" - "strconv" "strings" "time" - "github.com/dapr/kit/cron" "github.com/pkg/errors" "github.com/spf13/cobra" batchv1 "k8s.io/api/batch/v1" @@ -61,6 +59,7 @@ import ( "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/cli/util" "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) var ( @@ -121,7 +120,7 @@ var ( const annotationTrueValue = "true" type CreateBackupOptions struct { - BackupType string `json:"backupType"` + BackupMethod string `json:"backupMethod"` BackupName string `json:"backupName"` Role string `json:"role,omitempty"` BackupPolicy string `json:"backupPolicy"` @@ -173,8 +172,10 @@ func (o *CreateBackupOptions) Validate() error { return err } } - if o.BackupType == string(dpv1alpha1.BackupTypeLogFile) { - return fmt.Errorf(`can not create logfile backup, you can create it by enabling spec.schedule.logfile in BackupPolicy "%s"`, o.BackupPolicy) + if o.BackupMethod == "" { + // TODO(ldm): if backup policy only has one backup method, use it as default + // backup method. + return fmt.Errorf("missing backup method") } // TODO: check if pvc exists return nil @@ -212,7 +213,7 @@ func (o *CreateBackupOptions) getDefaultBackupPolicy() (string, error) { } var defaultBackupPolicies []unstructured.Unstructured for _, obj := range objs.Items { - if obj.GetAnnotations()[constant.DefaultBackupPolicyAnnotationKey] == annotationTrueValue { + if obj.GetAnnotations()[dptypes.DefaultBackupPolicyAnnotationKey] == annotationTrueValue { defaultBackupPolicies = append(defaultBackupPolicies, obj) } } @@ -258,7 +259,7 @@ func NewCreateBackupCmd(f cmdutil.Factory, streams genericclioptions.IOStreams) }, } - cmd.Flags().StringVar(&o.BackupType, "type", "snapshot", "Backup type") + cmd.Flags().StringVar(&o.BackupMethod, "method", "", "Backup method that defined in backup policy") cmd.Flags().StringVar(&o.BackupName, "name", "", "Backup name") cmd.Flags().StringVar(&o.BackupPolicy, "policy", "", "Backup policy name, this flag will be ignored when backup-type is snapshot") @@ -302,28 +303,23 @@ func PrintBackupList(o ListBackupOptions) error { // sort the unstructured objects with the creationTimestamp in positive order sort.Sort(unstructuredList(backupList.Items)) tbl := printer.NewTablePrinter(o.Out) - tbl.SetHeader("NAME", "NAMESPACE", "SOURCE-CLUSTER", "TYPE", "STATUS", "TOTAL-SIZE", "DURATION", "CREATE-TIME", "COMPLETION-TIME", "EXPIRATION") + tbl.SetHeader("NAME", "NAMESPACE", "SOURCE-CLUSTER", "METHOD", "STATUS", "TOTAL-SIZE", "DURATION", "CREATE-TIME", "COMPLETION-TIME", "EXPIRATION") for _, obj := range backupList.Items { backup := &dpv1alpha1.Backup{} if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, backup); err != nil { return err } - sourceCluster := backup.Status.SourceCluster - if sourceCluster == "" { - sourceCluster = backup.Labels[constant.AppInstanceLabelKey] - } + // TODO(ldm): find cluster from backup policy target spec. + sourceCluster := backup.Labels[constant.AppInstanceLabelKey] durationStr := "" if backup.Status.Duration != nil { durationStr = duration.HumanDuration(backup.Status.Duration.Duration) } statusString := string(backup.Status.Phase) - if backup.Status.Phase == dpv1alpha1.BackupRunning && backup.Status.AvailableReplicas != nil { - statusString = fmt.Sprintf("%s(AvailablePods: %d)", statusString, *backup.Status.AvailableReplicas) - } if len(o.Names) > 0 && !backupNameMap[backup.Name] { continue } - tbl.AddRow(backup.Name, backup.Namespace, sourceCluster, backup.Spec.BackupType, statusString, backup.Status.TotalSize, + tbl.AddRow(backup.Name, backup.Namespace, sourceCluster, backup.Spec.BackupMethod, statusString, backup.Status.TotalSize, durationStr, util.TimeFormat(&backup.CreationTimestamp), util.TimeFormat(backup.Status.CompletionTimestamp), util.TimeFormat(backup.Status.Expiration)) } @@ -419,9 +415,9 @@ type CreateRestoreOptions struct { Backup string `json:"backup,omitempty"` // point in time recovery args - RestoreTime *time.Time `json:"restoreTime,omitempty"` - RestoreTimeStr string `json:"restoreTimeStr,omitempty"` - SourceCluster string `json:"sourceCluster,omitempty"` + RestoreTime *time.Time `json:"restoreTime,omitempty"` + RestoreTimeStr string `json:"restoreTimeStr,omitempty"` + RestoreManagementPolicy string `json:"volumeRestorePolicy,omitempty"` create.CreateOptions `json:"-"` } @@ -443,8 +439,6 @@ func (o *CreateRestoreOptions) getClusterObject(backup *dpv1alpha1.Backup) (*app func (o *CreateRestoreOptions) Run() error { if o.Backup != "" { return o.runRestoreFromBackup() - } else if o.RestoreTime != nil { - return o.runPITR() } return nil } @@ -455,25 +449,30 @@ func (o *CreateRestoreOptions) runRestoreFromBackup() error { if err := cluster.GetK8SClientObject(o.Dynamic, backup, types.BackupGVR(), o.Namespace, o.Backup); err != nil { return err } - if backup.Status.Phase != dpv1alpha1.BackupCompleted { + if backup.Status.Phase != dpv1alpha1.BackupPhaseCompleted { return errors.Errorf(`backup "%s" is not completed.`, backup.Name) } if len(backup.Labels[constant.AppInstanceLabelKey]) == 0 { return errors.Errorf(`missing source cluster in backup "%s", "app.kubernetes.io/instance" is empty in labels.`, o.Backup) } + + restoreTimeStr, err := formatRestoreTimeAndValidate(o.RestoreTimeStr, backup) + if err != nil { + return err + } // get the cluster object and set the annotation for restore clusterObj, err := o.getClusterObject(backup) if err != nil { return err } - restoreAnnotation, err := getRestoreFromBackupAnnotation(backup, len(clusterObj.Spec.ComponentSpecs), clusterObj.Spec.ComponentSpecs[0].Name) + restoreAnnotation, err := getRestoreFromBackupAnnotation(backup, o.RestoreManagementPolicy, len(clusterObj.Spec.ComponentSpecs), clusterObj.Spec.ComponentSpecs[0].Name, restoreTimeStr) if err != nil { return err } clusterObj.ObjectMeta = metav1.ObjectMeta{ Namespace: clusterObj.Namespace, Name: o.Name, - Annotations: map[string]string{constant.RestoreFromBackUpAnnotationKey: restoreAnnotation}, + Annotations: map[string]string{constant.RestoreFromBackupAnnotationKey: restoreAnnotation}, } return o.createCluster(clusterObj) } @@ -500,103 +499,13 @@ func (o *CreateRestoreOptions) createCluster(cluster *appsv1alpha1.Cluster) erro return nil } -func (o *CreateRestoreOptions) runPITR() error { - objs, err := o.Dynamic.Resource(types.BackupGVR()).Namespace(o.Namespace). - List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", - constant.AppInstanceLabelKey, o.SourceCluster), - }) - if err != nil { - return err - } - backup := &dpv1alpha1.Backup{} - - // no need to check items len because it is validated by o.validateRestoreTime(). - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(objs.Items[0].Object, backup); err != nil { - return err - } - compName := backup.Labels[constant.KBAppComponentLabelKey] - if compName == "" { - return fmt.Errorf(`component name label %s is missing in backup "%s"`, constant.KBAppComponentLabelKey, backup.Name) - } - // TODO: use opsRequest to create cluster. - // get the cluster object and set the annotation for restore - clusterObj, err := o.getClusterObject(backup) - if err != nil { - return err - } - // TODO: hack implement for multi-component cluster, how to elegantly implement pitr for multi-component cluster? - clusterObj.ObjectMeta = metav1.ObjectMeta{ - Namespace: clusterObj.Namespace, - Name: o.Name, - Annotations: map[string]string{ - constant.RestoreFromTimeAnnotationKey: fmt.Sprintf(`{"%s":"%s"}`, compName, o.RestoreTime.Format(time.RFC3339)), - constant.RestoreFromSrcClusterAnnotationKey: o.SourceCluster, - }, - } - return o.createCluster(clusterObj) -} - func isTimeInRange(t time.Time, start time.Time, end time.Time) bool { return !t.Before(start) && !t.After(end) } -func (o *CreateRestoreOptions) validateRestoreTime() error { - if o.RestoreTimeStr == "" && o.SourceCluster == "" { - return nil - } - if o.RestoreTimeStr != "" && o.SourceCluster == "" { - return fmt.Errorf("--source-cluster must be specified if specified --restore-to-time") - } - restoreTime, err := util.TimeParse(o.RestoreTimeStr, time.Second) - if err != nil { - // retry to parse time with RFC3339 format. - var errRFC error - restoreTime, errRFC = time.Parse(time.RFC3339, o.RestoreTimeStr) - if errRFC != nil { - // if retry failure, report the error - return err - } - } - o.RestoreTime = &restoreTime - objs, err := o.Dynamic.Resource(types.BackupGVR()).Namespace(o.Namespace). - List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", - constant.AppInstanceLabelKey, o.SourceCluster), - }) - if err != nil { - return err - } - backupMap := map[string][]dpv1alpha1.Backup{} - for _, i := range objs.Items { - obj := dpv1alpha1.Backup{} - if err = runtime.DefaultUnstructuredConverter.FromUnstructured(i.Object, &obj); err != nil { - return err - } - uid := obj.Labels[constant.DataProtectionLabelClusterUIDKey] - if backupMap[uid] == nil { - backupMap[uid] = make([]dpv1alpha1.Backup, 0) - } - backupMap[uid] = append(backupMap[uid], obj) - } - for _, v := range backupMap { - recoverableTime := dpv1alpha1.GetRecoverableTimeRange(v) - for _, i := range recoverableTime { - if isTimeInRange(restoreTime, i.StartTime.Time, i.StopTime.Time) { - return nil - } - } - } - return fmt.Errorf("restore-to-time is out of time range, you can view the recoverable time: \n"+ - "\tkbcli cluster describe %s -n %s", o.SourceCluster, o.Namespace) -} - func (o *CreateRestoreOptions) Validate() error { - if o.Backup == "" && o.RestoreTimeStr == "" { - return fmt.Errorf("must be specified one of the --backup or --restore-to-time") - } - if err := o.validateRestoreTime(); err != nil { - return err + if o.Backup == "" { + return fmt.Errorf("must be specified one of the --backup ") } if o.Name == "" { @@ -634,7 +543,7 @@ func NewCreateRestoreCmd(f cmdutil.Factory, streams genericclioptions.IOStreams) } cmd.Flags().StringVar(&o.Backup, "backup", "", "Backup name") cmd.Flags().StringVar(&o.RestoreTimeStr, "restore-to-time", "", "point in time recovery(PITR)") - cmd.Flags().StringVar(&o.SourceCluster, "source-cluster", "", "source cluster name") + cmd.Flags().StringVar(&o.RestoreManagementPolicy, "volume-restore-policy", "Parallel", "the volume claim restore policy, supported values: [Serial, Parallel]") return cmd } @@ -688,7 +597,7 @@ func printBackupPolicyList(o list.ListOptions) error { tbl := printer.NewTablePrinter(o.Out) tbl.SetHeader("NAME", "NAMESPACE", "DEFAULT", "CLUSTER", "CREATE-TIME", "STATUS") for _, obj := range backupPolicyList.Items { - defaultPolicy, ok := obj.GetAnnotations()[constant.DefaultBackupPolicyAnnotationKey] + defaultPolicy, ok := obj.GetAnnotations()[dptypes.DefaultBackupPolicyAnnotationKey] backupPolicy := &dpv1alpha1.BackupPolicy{} if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, backupPolicy); err != nil { return err @@ -766,28 +675,7 @@ func (o *editBackupPolicyOptions) complete(args []string) error { if o.dynamic, err = o.Factory.DynamicClient(); err != nil { return err } - updateSchedulePolicyEnable := func(schedulePolicy *dpv1alpha1.SchedulePolicy, targetVal string) error { - if schedulePolicy != nil { - enable, err := strconv.ParseBool(targetVal) - if err != nil { - return err - } - schedulePolicy.Enable = enable - } - return nil - } - updateSchedulePolicyCronExpression := func(schedulePolicy *dpv1alpha1.SchedulePolicy, targetVal string) error { - if targetVal != "" { - if _, err = cron.ParseStandard(targetVal); err != nil { - return err - } - } - if schedulePolicy != nil { - schedulePolicy.CronExpression = targetVal - } - return nil - } - updateRepoName := func(commonPolicy *dpv1alpha1.CommonBackupPolicy, targetVal string) error { + updateRepoName := func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { // check if the backup repo exists if targetVal != "" { _, err := o.dynamic.Resource(types.BackupRepoGVR()).Get(context.Background(), targetVal, metav1.GetOptions{}) @@ -795,117 +683,22 @@ func (o *editBackupPolicyOptions) complete(args []string) error { return err } } - if commonPolicy != nil { + if backupPolicy != nil { if targetVal != "" { - commonPolicy.BackupRepoName = &targetVal + backupPolicy.Spec.BackupRepoName = &targetVal } else { - commonPolicy.BackupRepoName = nil + backupPolicy.Spec.BackupRepoName = nil } } return nil } - updatePVCName := func(commonPolicy *dpv1alpha1.CommonBackupPolicy, targetVal string) error { - if commonPolicy != nil { - commonPolicy.PersistentVolumeClaim.Name = &targetVal - } - return nil - } - updatePVCStorageClass := func(commonPolicy *dpv1alpha1.CommonBackupPolicy, targetVal string) error { - if commonPolicy != nil { - commonPolicy.PersistentVolumeClaim.StorageClassName = &targetVal - } - return nil - } + o.editContent = []editorRow{ { - key: "retention.ttl", - jsonpath: "retention.ttl", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - backupPolicy.Spec.Retention.TTL = &targetVal - return nil - }, - }, - { - key: "schedule.datafile.enable", - jsonpath: "schedule.datafile.enable", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateSchedulePolicyEnable(backupPolicy.Spec.Schedule.Datafile, targetVal) - }, - }, - { - key: "schedule.datafile.cronExpression", - jsonpath: "schedule.datafile.cronExpression", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateSchedulePolicyCronExpression(backupPolicy.Spec.Schedule.Datafile, targetVal) - }, - }, - { - key: "schedule.snapshot.enable", - jsonpath: "schedule.snapshot.enable", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateSchedulePolicyEnable(backupPolicy.Spec.Schedule.Snapshot, targetVal) - }, - }, - { - key: "schedule.snapshot.cronExpression", - jsonpath: "schedule.snapshot.cronExpression", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateSchedulePolicyCronExpression(backupPolicy.Spec.Schedule.Snapshot, targetVal) - }, - }, - { - key: "schedule.logfile.enable", - jsonpath: "schedule.logfile.enable", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateSchedulePolicyEnable(backupPolicy.Spec.Schedule.Logfile, targetVal) - }, - }, - { - key: "schedule.logfile.cronExpression", - jsonpath: "schedule.logfile.cronExpression", updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateSchedulePolicyCronExpression(backupPolicy.Spec.Schedule.Logfile, targetVal) - }, - }, - { - key: "datafile.pvc.name", - jsonpath: "datafile.persistentVolumeClaim.name", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updatePVCName(backupPolicy.Spec.Datafile, targetVal) - }, - }, - { - key: "datafile.pvc.storageClassName", - jsonpath: "datafile.persistentVolumeClaim.storageClassName", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updatePVCStorageClass(backupPolicy.Spec.Datafile, targetVal) - }, - }, - { - key: "datafile.backupRepoName", - jsonpath: "datafile.backupRepoName", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateRepoName(backupPolicy.Spec.Datafile, targetVal) - }, - }, - { - key: "logfile.pvc.name", - jsonpath: "logfile.persistentVolumeClaim.name", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updatePVCName(backupPolicy.Spec.Logfile, targetVal) - }, - }, - { - key: "logfile.pvc.storageClassName", - jsonpath: "logfile.persistentVolumeClaim.storageClassName", - updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updatePVCStorageClass(backupPolicy.Spec.Logfile, targetVal) - }, - }, - { - key: "logfile.backupRepoName", - jsonpath: "logfile.backupRepoName", + key: "backupRepoName", + jsonpath: "backupRepoName", updateFunc: func(backupPolicy *dpv1alpha1.BackupPolicy, targetVal string) error { - return updateRepoName(backupPolicy.Spec.Logfile, targetVal) + return updateRepoName(backupPolicy, targetVal) }, }, } @@ -1097,23 +890,23 @@ func (o *DescribeBackupOptions) Run() error { } func (o *DescribeBackupOptions) printBackupObj(obj *dpv1alpha1.Backup) error { + targetCluster := obj.Labels[constant.AppInstanceLabelKey] printer.PrintLineWithTabSeparator( printer.NewPair("Name", obj.Name), - printer.NewPair("Cluster", obj.Status.SourceCluster), + printer.NewPair("Cluster", targetCluster), printer.NewPair("Namespace", obj.Namespace), ) printer.PrintLine("\nSpec:") - realPrintPairStringToLine("Type", string(obj.Spec.BackupType)) + realPrintPairStringToLine("Method", obj.Spec.BackupMethod) realPrintPairStringToLine("Policy Name", obj.Spec.BackupPolicyName) printer.PrintLine("\nStatus:") realPrintPairStringToLine("Phase", string(obj.Status.Phase)) realPrintPairStringToLine("Total Size", obj.Status.TotalSize) - realPrintPairStringToLine("Backup Tool", obj.Status.BackupToolName) - realPrintPairStringToLine("PVC Name", obj.Status.PersistentVolumeClaimName) - if obj.Status.AvailableReplicas != nil { - realPrintPairStringToLine("Available Replicas", string(*obj.Status.AvailableReplicas)) + if obj.Status.BackupMethod != nil { + realPrintPairStringToLine("ActionSet Name", obj.Status.BackupMethod.ActionSetName) } + realPrintPairStringToLine("PVC Name", obj.Status.PersistentVolumeClaimName) if obj.Status.Duration != nil { realPrintPairStringToLine("Duration", duration.HumanDuration(obj.Status.Duration.Duration)) } @@ -1123,26 +916,20 @@ func (o *DescribeBackupOptions) printBackupObj(obj *dpv1alpha1.Backup) error { // print failure reason, ignore error _ = o.enhancePrintFailureReason(obj.Name, obj.Status.FailureReason) - if obj.Status.Manifests != nil { - printer.PrintLine("\nManifests:") - realPrintPairStringToLine("Target", obj.Status.Manifests.Target) - if obj.Status.Manifests.BackupLog != nil { - realPrintPairStringToLine("Log Start Time", util.TimeFormat(obj.Status.Manifests.BackupLog.StartTime)) - realPrintPairStringToLine("Log Stop Time", util.TimeFormat(obj.Status.Manifests.BackupLog.StopTime)) - } - if obj.Status.Manifests.BackupTool != nil { - realPrintPairStringToLine("File Path", obj.Status.Manifests.BackupTool.FilePath) - realPrintPairStringToLine("Volume Name", obj.Status.Manifests.BackupTool.VolumeName) - realPrintPairStringToLine("Upload Total Size", obj.Status.Manifests.BackupTool.UploadTotalSize) - realPrintPairStringToLine("Checksum", obj.Status.Manifests.BackupTool.Checksum) - realPrintPairStringToLine("Checkpoint", obj.Status.Manifests.BackupTool.Checkpoint) - } - if obj.Status.Manifests.Snapshot != nil { - realPrintPairStringToLine("Snapshot Name", obj.Status.Manifests.Snapshot.VolumeSnapshotName) - realPrintPairStringToLine("Snapshot Content Name", obj.Status.Manifests.Snapshot.VolumeSnapshotContentName) - } - for k, v := range obj.Status.Manifests.UserContext { - realPrintPairStringToLine(k, v) + realPrintPairStringToLine("Path", obj.Status.Path) + + if obj.Status.TimeRange != nil { + realPrintPairStringToLine("Time Range Start", util.TimeFormat(obj.Status.TimeRange.Start)) + realPrintPairStringToLine("Time Range End", util.TimeFormat(obj.Status.TimeRange.End)) + } + + if len(obj.Status.VolumeSnapshots) > 0 { + printer.PrintLine("\nVolume Snapshots:") + for _, v := range obj.Status.VolumeSnapshots { + realPrintPairStringToLine("Name", v.Name) + realPrintPairStringToLine("Content Name", v.ContentName) + realPrintPairStringToLine("Volume Name:", v.VolumeName) + realPrintPairStringToLine("Size", v.Size) } } @@ -1173,7 +960,7 @@ func (o *DescribeBackupOptions) enhancePrintFailureReason(backupName, failureRea ctx := context.Background() // get the latest job log details. labels := fmt.Sprintf("%s=%s", - constant.DataProtectionLabelBackupNameKey, backupName, + dptypes.DataProtectionLabelBackupNameKey, backupName, ) jobList, err := o.client.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labels}) if err != nil { diff --git a/internal/cli/cmd/cluster/dataprotection_test.go b/internal/cli/cmd/cluster/dataprotection_test.go index 98482a8d75a..0d808d0d3b1 100644 --- a/internal/cli/cmd/cluster/dataprotection_test.go +++ b/internal/cli/cmd/cluster/dataprotection_test.go @@ -35,8 +35,6 @@ import ( k8sapitypes "k8s.io/apimachinery/pkg/types" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/fake" - "k8s.io/client-go/kubernetes/scheme" clientfake "k8s.io/client-go/rest/fake" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" @@ -50,7 +48,6 @@ import ( "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/cli/util" "github.com/apecloud/kubeblocks/internal/constant" - testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" ) var _ = Describe("DataProtection", func() { @@ -121,29 +118,13 @@ var _ = Describe("DataProtection", func() { By("test edit backup policy function") o := editBackupPolicyOptions{Factory: tf, IOStreams: streams, GVR: types.BackupPolicyGVR()} Expect(o.complete([]string{policyName})).Should(Succeed()) - o.values = []string{"schedule.datafile.enable=false", `schedule.datafile.cronExpression="0 17 * * *"`, - "schedule.logfile.enable=false", `schedule.logfile.cronExpression="* */1 * * *"`, - "schedule.snapshot.enable=false", `schedule.snapshot.cronExpression="0 17 * * *"`, - "logfile.pvc.name=test1", "logfile.pvc.storageClassName=t1", "logfile.backupRepoName=repo", - "datafile.pvc.name=test1", "datafile.pvc.storageClassName=t1", "datafile.backupRepoName=repo"} - Expect(o.runEditBackupPolicy()).Should(Succeed()) - - By("test unset backup repo") - o.values = []string{"datafile.backupRepoName="} + o.values = []string{"backupRepoName=repo"} Expect(o.runEditBackupPolicy()).Should(Succeed()) By("test backup repo not exists") - o.values = []string{"datafile.backupRepoName=repo1"} + o.values = []string{"backupRepoName=repo1"} Expect(o.runEditBackupPolicy()).Should(MatchError(ContainSubstring(`"repo1" not found`))) - By("test invalid key") - o.values = []string{"schedule.datafile.enable1=false"} - Expect(o.runEditBackupPolicy().Error()).Should(ContainSubstring("invalid key: schedule.datafile.enable1")) - - By("test invalid value") - o.values = []string{"schedule.datafile.enable=false="} - Expect(o.runEditBackupPolicy().Error()).Should(ContainSubstring("invalid row")) - By("test with vim editor") o.values = []string{} o.isTest = true @@ -177,6 +158,7 @@ var _ = Describe("DataProtection", func() { By("test with one default backupPolicy") initClient(defaultBackupPolicy) o.Dynamic = tf.FakeDynamicClient + o.BackupMethod = testing.BackupMethodName Expect(o.Validate()).Should(Succeed()) }) @@ -187,7 +169,8 @@ var _ = Describe("DataProtection", func() { cmd := NewCreateBackupCmd(tf, streams) Expect(cmd).ShouldNot(BeNil()) // must succeed otherwise exit 1 and make test fails - _ = cmd.Flags().Set("backup-policy", defaultBackupPolicy.Name) + _ = cmd.Flags().Set("policy", defaultBackupPolicy.Name) + _ = cmd.Flags().Set("method", testing.BackupMethodName) cmd.Run(cmd, []string{testing.ClusterName}) By("test with logfile type") @@ -200,12 +183,11 @@ var _ = Describe("DataProtection", func() { Name: testing.ClusterName, }, BackupPolicy: defaultBackupPolicy.Name, - BackupType: string(dpv1alpha1.BackupTypeLogFile), + BackupMethod: testing.BackupMethodName, } Expect(o.CompleteBackup()).Should(Succeed()) err := o.Validate() - Expect(err).Should(HaveOccurred()) - Expect(err.Error()).Should(ContainSubstring("can not create logfile backup, you can create it by enabling spec.schedule.logfile in BackupPolicy")) + Expect(err).Should(Succeed()) }) }) @@ -248,16 +230,13 @@ var _ = Describe("DataProtection", func() { backup1.Labels = map[string]string{ constant.AppInstanceLabelKey: "apecloud-mysql", } - AvailableReplicas := int32(1) - backup1.Status.Phase = dpv1alpha1.BackupRunning - backup1.Status.AvailableReplicas = &AvailableReplicas + backup1.Status.Phase = dpv1alpha1.BackupPhaseRunning backup2 := testing.FakeBackup("test1") backup2.Namespace = "backup" tf.FakeDynamicClient = testing.FakeDynamicClient(backup1, backup2) Expect(PrintBackupList(o)).Should(Succeed()) Expect(o.Out.(*bytes.Buffer).String()).Should(ContainSubstring("test1")) Expect(o.Out.(*bytes.Buffer).String()).Should(ContainSubstring("apecloud-mysql")) - Expect(o.Out.(*bytes.Buffer).String()).Should(ContainSubstring("(AvailablePods: 1)")) By("test list all namespace") o.Out.(*bytes.Buffer).Reset() @@ -281,15 +260,13 @@ var _ = Describe("DataProtection", func() { backupPolicy := testing.FakeBackupPolicy("backPolicy", clusterObj.Name) pods := testing.FakePods(1, testing.Namespace, clusterName) - tf.FakeDynamicClient = fake.NewSimpleDynamicClient( - scheme.Scheme, &secrets.Items[0], &pods.Items[0], clusterObj, backupPolicy) - tf.FakeDynamicClient = fake.NewSimpleDynamicClient( - scheme.Scheme, &secrets.Items[0], &pods.Items[0], clusterDef, clusterObj, backupPolicy) + tf.FakeDynamicClient = testing.FakeDynamicClient(&secrets.Items[0], + &pods.Items[0], clusterDef, clusterObj, backupPolicy) tf.Client = &clientfake.RESTClient{} // create backup cmd := NewCreateBackupCmd(tf, streams) Expect(cmd).ShouldNot(BeNil()) - _ = cmd.Flags().Set("type", "snapshot") + _ = cmd.Flags().Set("method", testing.BackupMethodName) _ = cmd.Flags().Set("name", backupName) cmd.Run(nil, []string{clusterName}) @@ -315,77 +292,77 @@ var _ = Describe("DataProtection", func() { cmdRestore.Run(nil, []string{newClusterName + "-with-nil-affinity"}) }) - It("restore-to-time", func() { - timestamp := time.Now().Format("20060102150405") - backupName := "backup-test-" + timestamp - backupName1 := backupName + "1" - clusterName := "source-cluster-" + timestamp - secrets := testing.FakeSecrets(testing.Namespace, clusterName) - clusterDef := testing.FakeClusterDef() - cluster := testing.FakeCluster(clusterName, testing.Namespace) - clusterDefLabel := map[string]string{ - constant.ClusterDefLabelKey: clusterDef.Name, - } - cluster.SetLabels(clusterDefLabel) - backupPolicy := testing.FakeBackupPolicy("backPolicy", cluster.Name) - backupTypeMeta := testing.FakeBackup("backup-none").TypeMeta - backupLabels := map[string]string{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: "test", - constant.DataProtectionLabelClusterUIDKey: string(cluster.UID), - } - now := metav1.Now() - baseBackup := testapps.NewBackupFactory(testing.Namespace, "backup-base"). - SetBackupType(dpv1alpha1.BackupTypeSnapshot). - SetBackLog(now.Add(-time.Minute), now.Add(-time.Second)). - SetLabels(backupLabels).GetObject() - baseBackup.TypeMeta = backupTypeMeta - baseBackup.Status.Phase = dpv1alpha1.BackupCompleted - logfileBackup := testapps.NewBackupFactory(testing.Namespace, backupName). - SetBackupType(dpv1alpha1.BackupTypeLogFile). - SetBackLog(now.Add(-time.Minute), now.Add(time.Minute)). - SetLabels(backupLabels).GetObject() - logfileBackup.TypeMeta = backupTypeMeta - - logfileBackup1 := testapps.NewBackupFactory(testing.Namespace, backupName1). - SetBackupType(dpv1alpha1.BackupTypeLogFile). - SetBackLog(now.Add(-time.Minute), now.Add(2*time.Minute)).GetObject() - uid := string(cluster.UID) - logfileBackup1.Labels = map[string]string{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: "test", - constant.DataProtectionLabelClusterUIDKey: uid[:30] + "00", - } - logfileBackup1.TypeMeta = backupTypeMeta - - pods := testing.FakePods(1, testing.Namespace, clusterName) - tf.FakeDynamicClient = fake.NewSimpleDynamicClient( - scheme.Scheme, &secrets.Items[0], &pods.Items[0], cluster, backupPolicy, baseBackup, logfileBackup, logfileBackup1) - tf.Client = &clientfake.RESTClient{} - - By("restore new cluster from source cluster which is not deleted") - cmdRestore := NewCreateRestoreCmd(tf, streams) - Expect(cmdRestore != nil).To(BeTrue()) - _ = cmdRestore.Flags().Set("restore-to-time", util.TimeFormatWithDuration(&now, time.Second)) - _ = cmdRestore.Flags().Set("source-cluster", clusterName) - cmdRestore.Run(nil, []string{}) - - // test with RFC3339 format - _ = cmdRestore.Flags().Set("restore-to-time", now.Format(time.RFC3339)) - _ = cmdRestore.Flags().Set("source-cluster", clusterName) - cmdRestore.Run(nil, []string{"new-cluster"}) - - By("restore should be failed when backups belong to different source clusters") - o := &CreateRestoreOptions{CreateOptions: create.CreateOptions{ - IOStreams: streams, - Factory: tf, - }} - restoreTime := time.Now().Add(90 * time.Second) - o.RestoreTimeStr = util.TimeFormatWithDuration(&metav1.Time{Time: restoreTime}, time.Second) - o.SourceCluster = clusterName - Expect(o.Complete()).Should(Succeed()) - Expect(o.validateRestoreTime().Error()).Should(ContainSubstring("restore-to-time is out of time range")) - }) + // It("restore-to-time", func() { + // timestamp := time.Now().Format("20060102150405") + // backupName := "backup-test-" + timestamp + // backupName1 := backupName + "1" + // clusterName := "source-cluster-" + timestamp + // secrets := testing.FakeSecrets(testing.Namespace, clusterName) + // clusterDef := testing.FakeClusterDef() + // cluster := testing.FakeCluster(clusterName, testing.Namespace) + // clusterDefLabel := map[string]string{ + // constant.ClusterDefLabelKey: clusterDef.Name, + // } + // cluster.SetLabels(clusterDefLabel) + // backupPolicy := testing.FakeBackupPolicy("backPolicy", cluster.Name) + // backupTypeMeta := testing.FakeBackup("backup-none").TypeMeta + // backupLabels := map[string]string{ + // constant.AppInstanceLabelKey: clusterName, + // constant.KBAppComponentLabelKey: "test", + // dptypes.DataProtectionLabelClusterUIDKey: string(cluster.UID), + // } + // now := metav1.Now() + // baseBackup := testapps.NewBackupFactory(testing.Namespace, "backup-base"). + // SetBackupMethod(dpv1alpha1.BackupTypeSnapshot). + // SetBackupTimeRange(now.Add(-time.Minute), now.Add(-time.Second)). + // SetLabels(backupLabels).GetObject() + // baseBackup.TypeMeta = backupTypeMeta + // baseBackup.Status.Phase = dpv1alpha1.BackupPhaseCompleted + // logfileBackup := testapps.NewBackupFactory(testing.Namespace, backupName). + // SetBackupMethod(dpv1alpha1.BackupTypeLogFile). + // SetBackupTimeRange(now.Add(-time.Minute), now.Add(time.Minute)). + // SetLabels(backupLabels).GetObject() + // logfileBackup.TypeMeta = backupTypeMeta + // + // logfileBackup1 := testapps.NewBackupFactory(testing.Namespace, backupName1). + // SetBackupMethod(dpv1alpha1.BackupTypeLogFile). + // SetBackupTimeRange(now.Add(-time.Minute), now.Add(2*time.Minute)).GetObject() + // uid := string(cluster.UID) + // logfileBackup1.Labels = map[string]string{ + // constant.AppInstanceLabelKey: clusterName, + // constant.KBAppComponentLabelKey: "test", + // constant.DataProtectionLabelClusterUIDKey: uid[:30] + "00", + // } + // logfileBackup1.TypeMeta = backupTypeMeta + // + // pods := testing.FakePods(1, testing.Namespace, clusterName) + // tf.FakeDynamicClient = fake.NewSimpleDynamicClient( + // scheme.Scheme, &secrets.Items[0], &pods.Items[0], cluster, backupPolicy, baseBackup, logfileBackup, logfileBackup1) + // tf.Client = &clientfake.RESTClient{} + // + // By("restore new cluster from source cluster which is not deleted") + // cmdRestore := NewCreateRestoreCmd(tf, streams) + // Expect(cmdRestore != nil).To(BeTrue()) + // _ = cmdRestore.Flags().Set("restore-to-time", util.TimeFormatWithDuration(&now, time.Second)) + // _ = cmdRestore.Flags().Set("source-cluster", clusterName) + // cmdRestore.Run(nil, []string{}) + // + // // test with RFC3339 format + // _ = cmdRestore.Flags().Set("restore-to-time", now.Format(time.RFC3339)) + // _ = cmdRestore.Flags().Set("source-cluster", clusterName) + // cmdRestore.Run(nil, []string{"new-cluster"}) + // + // By("restore should be failed when backups belong to different source clusters") + // o := &CreateRestoreOptions{CreateOptions: create.CreateOptions{ + // IOStreams: streams, + // Factory: tf, + // }} + // restoreTime := time.Now().Add(90 * time.Second) + // o.RestoreTimeStr = util.TimeFormatWithDuration(&metav1.Time{Time: restoreTime}, time.Second) + // o.SourceCluster = clusterName + // Expect(o.Complete()).Should(Succeed()) + // Expect(o.validateRestoreTime().Error()).Should(ContainSubstring("restore-to-time is out of time range")) + // }) It("describe-backup", func() { cmd := NewDescribeBackupCmd(tf, streams) @@ -404,21 +381,12 @@ var _ = Describe("DataProtection", func() { backupName := "test1" backup1 := testing.FakeBackup(backupName) args = append(args, backupName) - availableReplicas := int32(1) - backup1.Status.Phase = dpv1alpha1.BackupCompleted + backup1.Status.Phase = dpv1alpha1.BackupPhaseCompleted logNow := metav1.Now() backup1.Status.StartTimestamp = &logNow backup1.Status.CompletionTimestamp = &logNow backup1.Status.Expiration = &logNow backup1.Status.Duration = &metav1.Duration{Duration: logNow.Sub(logNow.Time)} - backup1.Status.AvailableReplicas = &availableReplicas - backup1.Status.Manifests = &dpv1alpha1.ManifestsStatus{ - BackupLog: &dpv1alpha1.BackupLogStatus{StartTime: &logNow, StopTime: &logNow}, - BackupTool: &dpv1alpha1.BackupToolManifestsStatus{FilePath: "/backupdata/test1"}, - Snapshot: &dpv1alpha1.BackupSnapshotStatus{VolumeSnapshotName: backupName}, - UserContext: map[string]string{"user_define_key": "user_define_value"}, - } - backup1.Status.SourceCluster = "mycluster" tf.FakeDynamicClient = testing.FakeDynamicClient(backup1) Expect(o.Complete(args)).Should(Succeed()) o.client = testing.FakeClientSet() @@ -426,13 +394,13 @@ var _ = Describe("DataProtection", func() { }) }) -func mockBackupInfo(dynamic dynamic.Interface, backupName, clusterName string, manifests map[string]any, backupType string) { +func mockBackupInfo(dynamic dynamic.Interface, backupName, clusterName string, timeRange map[string]any, backupMethod string) { clusterString := fmt.Sprintf(`{"metadata":{"name":"deleted-cluster","namespace":"%s"},"spec":{"clusterDefinitionRef":"apecloud-mysql","clusterVersionRef":"ac-mysql-8.0.30","componentSpecs":[{"name":"mysql","componentDefRef":"mysql","replicas":1}]}}`, testing.Namespace) backupStatus := &unstructured.Unstructured{ Object: map[string]any{ "status": map[string]any{ "phase": "Completed", - "manifests": manifests, + "timeRange": timeRange, }, "metadata": map[string]any{ "name": backupName, @@ -445,7 +413,7 @@ func mockBackupInfo(dynamic dynamic.Interface, backupName, clusterName string, m }, }, "spec": map[string]any{ - "backupType": backupType, + "backupMethod": backupMethod, }, }, } diff --git a/internal/cli/cmd/cluster/describe.go b/internal/cli/cmd/cluster/describe.go index 8f4e1e3bd3f..c324e689fbb 100644 --- a/internal/cli/cmd/cluster/describe.go +++ b/internal/cli/cmd/cluster/describe.go @@ -23,11 +23,9 @@ import ( "fmt" "io" "strings" - "time" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/dynamic" @@ -41,7 +39,7 @@ import ( "github.com/apecloud/kubeblocks/internal/cli/printer" "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/cli/util" - "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) var ( @@ -233,60 +231,60 @@ func showDataProtection(backupPolicies []dpv1alpha1.BackupPolicy, backups []dpv1 } tbl := newTbl(out, "\nData Protection:", "AUTO-BACKUP", "BACKUP-SCHEDULE", "TYPE", "BACKUP-TTL", "LAST-SCHEDULE", "RECOVERABLE-TIME") for _, policy := range backupPolicies { - if policy.Annotations[constant.DefaultBackupPolicyAnnotationKey] != "true" { + if policy.Annotations[dptypes.DefaultBackupPolicyAnnotationKey] != "true" { continue } - if policy.Status.Phase != dpv1alpha1.PolicyAvailable { + if policy.Status.Phase != dpv1alpha1.AvailablePhase { continue } - ttlString := printer.NoneString - backupSchedule := printer.NoneString - backupType := printer.NoneString - scheduleEnable := "Disabled" - if policy.Spec.Schedule.Snapshot != nil { - if policy.Spec.Schedule.Snapshot.Enable { - scheduleEnable = "Enabled" - backupSchedule = policy.Spec.Schedule.Snapshot.CronExpression - backupType = string(dpv1alpha1.BackupTypeSnapshot) - } - } - if policy.Spec.Schedule.Datafile != nil { - if policy.Spec.Schedule.Datafile.Enable { - scheduleEnable = "Enabled" - backupSchedule = policy.Spec.Schedule.Datafile.CronExpression - backupType = string(dpv1alpha1.BackupTypeDataFile) - } - } - if policy.Spec.Retention != nil && policy.Spec.Retention.TTL != nil { - ttlString = *policy.Spec.Retention.TTL - } - lastScheduleTime := printer.NoneString - if policy.Status.LastScheduleTime != nil { - lastScheduleTime = util.TimeFormat(policy.Status.LastScheduleTime) - } - tbl.AddRow(scheduleEnable, backupSchedule, backupType, ttlString, lastScheduleTime, getBackupRecoverableTime(backups)) + // ttlString := printer.NoneString + // backupSchedule := printer.NoneString + // backupType := printer.NoneString + // scheduleEnable := "Disabled" + // if policy.Spec.SchedulePolicy.Snapshot != nil { + // if policy.Spec.SchedulePolicy.Snapshot.Enable { + // scheduleEnable = "Enabled" + // backupSchedule = policy.Spec.SchedulePolicy.Snapshot.CronExpression + // backupType = string(dpv1alpha1.BackupTypeSnapshot) + // } + // } + // if policy.Spec.SchedulePolicy.Datafile != nil { + // if policy.Spec.SchedulePolicy.Datafile.Enable { + // scheduleEnable = "Enabled" + // backupSchedule = policy.Spec.SchedulePolicy.Datafile.CronExpression + // backupType = string(dpv1alpha1.BackupTypeDataFile) + // } + // } + // if policy.Spec.Retention != nil && policy.Spec.Retention.TTL != nil { + // ttlString = *policy.Spec.Retention.TTL + // } + // lastScheduleTime := printer.NoneString + // if policy.Status.LastScheduleTime != nil { + // lastScheduleTime = util.TimeFormat(policy.Status.LastScheduleTime) + // } + // tbl.AddRow(scheduleEnable, backupSchedule, backupType, ttlString, lastScheduleTime, getBackupRecoverableTime(backups)) } tbl.Print() } -// getBackupRecoverableTime returns the recoverable time range string -func getBackupRecoverableTime(backups []dpv1alpha1.Backup) string { - recoverabelTime := dpv1alpha1.GetRecoverableTimeRange(backups) - var result string - for _, i := range recoverabelTime { - result = addTimeRange(result, i.StartTime, i.StopTime) - } - if result == "" { - return printer.NoneString - } - return result -} - -func addTimeRange(result string, start, end *metav1.Time) string { - if result != "" { - result += ", " - } - result += fmt.Sprintf("%s ~ %s", util.TimeFormatWithDuration(start, time.Second), - util.TimeFormatWithDuration(end, time.Second)) - return result -} +// getBackupRecoverableTime returns the recoverable time range string +// func getBackupRecoverableTime(backups []dpv1alpha1.Backup) string { +// recoverabelTime := dpv1alpha1.GetRecoverableTimeRange(backups) +// var result string +// for _, i := range recoverabelTime { +// result = addTimeRange(result, i.StartTime, i.StopTime) +// } +// if result == "" { +// return printer.NoneString +// } +// return result +// } + +// func addTimeRange(result string, start, end *metav1.Time) string { +// if result != "" { +// result += ", " +// } +// result += fmt.Sprintf("%s ~ %s", util.TimeFormatWithDuration(start, time.Second), +// util.TimeFormatWithDuration(end, time.Second)) +// return result +// } diff --git a/internal/cli/cmd/cluster/describe_test.go b/internal/cli/cmd/cluster/describe_test.go index 584d0b458ce..63ffe53d389 100644 --- a/internal/cli/cmd/cluster/describe_test.go +++ b/internal/cli/cmd/cluster/describe_test.go @@ -124,22 +124,18 @@ var _ = Describe("Expose", func() { } now := metav1.Now() fakeBackups[0].Status = dpv1alpha1.BackupStatus{ - Phase: dpv1alpha1.BackupCompleted, - Manifests: &dpv1alpha1.ManifestsStatus{ - BackupLog: &dpv1alpha1.BackupLogStatus{ - StartTime: &now, - StopTime: &now, - }, + Phase: dpv1alpha1.BackupPhaseCompleted, + TimeRange: &dpv1alpha1.BackupTimeRange{ + Start: &now, + End: &now, }, } after := metav1.Time{Time: now.Add(time.Hour)} fakeBackups[1].Status = dpv1alpha1.BackupStatus{ - Phase: dpv1alpha1.BackupCompleted, - Manifests: &dpv1alpha1.ManifestsStatus{ - BackupLog: &dpv1alpha1.BackupLogStatus{ - StartTime: &now, - StopTime: &after, - }, + Phase: dpv1alpha1.BackupPhaseCompleted, + TimeRange: &dpv1alpha1.BackupTimeRange{ + Start: &now, + End: &after, }, } showDataProtection(fakeBackupPolicies, fakeBackups, out) diff --git a/internal/cli/cmd/cluster/update.go b/internal/cli/cmd/cluster/update.go index aeb548fc155..f8c0e10d263 100644 --- a/internal/cli/cmd/cluster/update.go +++ b/internal/cli/cmd/cluster/update.go @@ -43,7 +43,7 @@ import ( "k8s.io/kubectl/pkg/util/templates" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/internal/cli/cluster" "github.com/apecloud/kubeblocks/internal/cli/patch" "github.com/apecloud/kubeblocks/internal/cli/types" @@ -547,23 +547,19 @@ func (o *updateOptions) updateBackupRetentionPeriod(val string) error { return nil } - // judge whether val end with the 'd'|'D'|'h'|'H' character + // judge whether val end with the 'd'|'h' character lastChar := val[len(val)-1] - if lastChar != 'd' && lastChar != 'D' && lastChar != 'h' && lastChar != 'H' { - return fmt.Errorf("invalid retention period: %s, only support d|D|h|H", val) + if lastChar != 'd' && lastChar != 'h' { + return fmt.Errorf("invalid retention period: %s, only support d|h", val) } - o.cluster.Spec.Backup.RetentionPeriod = &val + o.cluster.Spec.Backup.RetentionPeriod = dpv1alpha1.RetentionPeriod(val) return nil } func (o *updateOptions) updateBackupMethod(val string) error { - method := dataprotectionv1alpha1.BackupMethod(val) - if method != dataprotectionv1alpha1.BackupMethodSnapshot && method != dataprotectionv1alpha1.BackupMethodBackupTool { - return fmt.Errorf("invalid backup method: %s, only support %s and %s", val, - dataprotectionv1alpha1.BackupMethodSnapshot, dataprotectionv1alpha1.BackupMethodBackupTool) - } - o.cluster.Spec.Backup.Method = dataprotectionv1alpha1.BackupMethod(val) + // TODO(ldm): validate backup method are defined in the backup policy. + o.cluster.Spec.Backup.Method = val return nil } diff --git a/internal/cli/cmd/kubeblocks/kubeblocks_objects_test.go b/internal/cli/cmd/kubeblocks/kubeblocks_objects_test.go index abebc2baf54..1b2ca9f6c40 100644 --- a/internal/cli/cmd/kubeblocks/kubeblocks_objects_test.go +++ b/internal/cli/cmd/kubeblocks/kubeblocks_objects_test.go @@ -75,35 +75,35 @@ var _ = Describe("kubeblocks objects", func() { clusterDef.Finalizers = []string{"test"} clusterVersion := testing.FakeClusterVersion() clusterVersion.Finalizers = []string{"test"} - backupTool := testing.FakeBackupTool() - backupTool.Finalizers = []string{"test"} + actionSet := testing.FakeActionSet() + actionSet.Finalizers = []string{"test"} testCases := []struct { clusterDef *appsv1alpha1.ClusterDefinition clusterVersion *appsv1alpha1.ClusterVersion - backupTool *dpv1alpha1.BackupTool + actionSet *dpv1alpha1.ActionSet }{ { clusterDef: testing.FakeClusterDef(), clusterVersion: testing.FakeClusterVersion(), - backupTool: testing.FakeBackupTool(), + actionSet: testing.FakeActionSet(), }, { clusterDef: clusterDef, clusterVersion: testing.FakeClusterVersion(), - backupTool: testing.FakeBackupTool(), + actionSet: testing.FakeActionSet(), }, { clusterDef: clusterDef, clusterVersion: clusterVersion, - backupTool: backupTool, + actionSet: actionSet, }, } for _, c := range testCases { objects := mockCRD() objects = append(objects, testing.FakeVolumeSnapshotClass()) - objects = append(objects, c.clusterDef, c.clusterVersion, c.backupTool) + objects = append(objects, c.clusterDef, c.clusterVersion, c.actionSet) client := testing.FakeDynamicClient(objects...) objs, _ := getKBObjects(client, "", nil) Expect(removeCustomResources(client, objs)).Should(Succeed()) @@ -135,22 +135,22 @@ var _ = Describe("kubeblocks objects", func() { Expect(objs[types.CRDGVR()].Items).Should(HaveLen(4)) // verify crs for _, gvr := range []schema.GroupVersionResource{types.ClusterDefGVR(), types.ClusterVersionGVR()} { - objlist, ok := objs[gvr] + objList, ok := objs[gvr] Expect(ok).Should(BeTrue()) - Expect(objlist.Items).Should(HaveLen(1)) + Expect(objList.Items).Should(HaveLen(1)) } // verify rbac info for _, gvr := range []schema.GroupVersionResource{types.RoleGVR(), types.ClusterRoleBindingGVR(), types.ServiceAccountGVR()} { - objlist, ok := objs[gvr] + objList, ok := objs[gvr] Expect(ok).Should(BeTrue()) - Expect(objlist.Items).Should(HaveLen(1), gvr.String()) + Expect(objList.Items).Should(HaveLen(1), gvr.String()) } - // verify cofnig tpl + // verify config tpl for _, gvr := range []schema.GroupVersionResource{types.ConfigmapGVR()} { - objlist, ok := objs[gvr] + objList, ok := objs[gvr] Expect(ok).Should(BeTrue()) - Expect(objlist.Items).Should(HaveLen(1), gvr.String()) + Expect(objList.Items).Should(HaveLen(1), gvr.String()) } }) }) @@ -200,20 +200,20 @@ func mockCRD() []runtime.Object { Status: v1.CustomResourceDefinitionStatus{}, } - backupToolCRD := v1.CustomResourceDefinition{ + actionSetCRD := v1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ Kind: "CustomResourceDefinition", APIVersion: "apiextensions.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "backuptools.dataprotection.kubeblocks.io", + Name: "actionsets.dataprotection.kubeblocks.io", }, Spec: v1.CustomResourceDefinitionSpec{ Group: types.DPAPIGroup, }, Status: v1.CustomResourceDefinitionStatus{}, } - return []runtime.Object{&clusterCRD, &clusterDefCRD, &clusterVersionCRD, &backupToolCRD} + return []runtime.Object{&clusterCRD, &clusterDefCRD, &clusterVersionCRD, &actionSetCRD} } func mockCRs() []runtime.Object { diff --git a/internal/cli/cmd/kubeblocks/status.go b/internal/cli/cmd/kubeblocks/status.go index 33da9bc955e..526d37eeac8 100644 --- a/internal/cli/cmd/kubeblocks/status.go +++ b/internal/cli/cmd/kubeblocks/status.go @@ -70,7 +70,7 @@ var ( } kubeBlocksGlobalCustomResources = []schema.GroupVersionResource{ - types.BackupToolGVR(), + types.ActionSetGVR(), types.ClusterDefGVR(), types.ClusterVersionGVR(), types.ConfigConstraintGVR(), diff --git a/internal/cli/cmd/report/report.go b/internal/cli/cmd/report/report.go index 183765e4949..48c20c18eae 100644 --- a/internal/cli/cmd/report/report.go +++ b/internal/cli/cmd/report/report.go @@ -33,7 +33,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" - klog "k8s.io/klog/v2" + "k8s.io/klog/v2" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/util" "k8s.io/kubectl/pkg/util/i18n" @@ -492,8 +492,9 @@ func (o *reportClusterOptions) handleManifests(ctx context.Context) error { types.RoleBindingGVR(), types.BackupGVR(), types.BackupPolicyGVR(), - types.BackupToolGVR(), - types.RestoreJobGVR(), + types.BackupScheduleGVR(), + types.ActionSetGVR(), + types.RestoreGVR(), types.PVCGVR(), } globalGvrs = []schema.GroupVersionResource{ diff --git a/internal/cli/create/template/backup_template.cue b/internal/cli/create/template/backup_template.cue index 85d37eabf71..5d0b80f9d56 100644 --- a/internal/cli/create/template/backup_template.cue +++ b/internal/cli/create/template/backup_template.cue @@ -19,7 +19,7 @@ options: { backupName: string namespace: string - backupType: string + backupMethod: string backupPolicy: string } @@ -35,7 +35,7 @@ content: { } } spec: { - backupType: options.backupType + backupMethod: options.backupMethod backupPolicyName: options.backupPolicy } } diff --git a/internal/cli/scheme/install.go b/internal/cli/scheme/install.go index 7105830579b..185e51edf22 100644 --- a/internal/cli/scheme/install.go +++ b/internal/cli/scheme/install.go @@ -28,7 +28,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) @@ -37,7 +37,7 @@ func init() { utilruntime.Must(metav1.AddMetaToScheme(Scheme)) utilruntime.Must(clientgoscheme.AddToScheme(Scheme)) utilruntime.Must(appsv1alpha1.AddToScheme(Scheme)) - utilruntime.Must(dataprotectionv1alpha1.AddToScheme(Scheme)) + utilruntime.Must(dpv1alpha1.AddToScheme(Scheme)) utilruntime.Must(snapshotv1.AddToScheme(Scheme)) utilruntime.Must(snapshotv1beta1.AddToScheme(Scheme)) utilruntime.Must(extensionsv1alpha1.AddToScheme(Scheme)) diff --git a/internal/cli/testing/fake.go b/internal/cli/testing/fake.go index 8e98600f3d7..f300935e8a0 100644 --- a/internal/cli/testing/fake.go +++ b/internal/cli/testing/fake.go @@ -44,6 +44,8 @@ import ( storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/constant" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" ) @@ -62,7 +64,9 @@ const ( KubeBlocksRepoName = "fake-kubeblocks-repo" KubeBlocksChartName = "fake-kubeblocks" KubeBlocksChartURL = "fake-kubeblocks-chart-url" - BackupToolName = "fake-backup-tool" + BackupMethodName = "fake-backup-method" + ActionSetName = "fake-action-set" + BackupName = "fake-backup-name" IsDefault = true IsNotDefault = false @@ -356,14 +360,13 @@ func FakeClusterVersion() *appsv1alpha1.ClusterVersion { return cv } -func FakeBackupTool() *dpv1alpha1.BackupTool { - tool := &dpv1alpha1.BackupTool{} - tool.Name = BackupToolName - return tool +func FakeActionSet() *dpv1alpha1.ActionSet { + as := &dpv1alpha1.ActionSet{} + as.Name = ActionSetName + return as } func FakeBackupPolicy(backupPolicyName, clusterName string) *dpv1alpha1.BackupPolicy { - ttl := "7d" template := &dpv1alpha1.BackupPolicy{ TypeMeta: metav1.TypeMeta{ APIVersion: fmt.Sprintf("%s/%s", types.DPAPIGroup, types.DPAPIVersion), @@ -376,51 +379,30 @@ func FakeBackupPolicy(backupPolicyName, clusterName string) *dpv1alpha1.BackupPo constant.AppInstanceLabelKey: clusterName, }, Annotations: map[string]string{ - constant.DefaultBackupPolicyAnnotationKey: "true", + dptypes.DefaultBackupPolicyAnnotationKey: "true", }, }, Spec: dpv1alpha1.BackupPolicySpec{ - Snapshot: &dpv1alpha1.SnapshotPolicy{ - BasePolicy: dpv1alpha1.BasePolicy{ - BackupsHistoryLimit: 1, - }, - }, - Datafile: &dpv1alpha1.CommonBackupPolicy{ - BasePolicy: dpv1alpha1.BasePolicy{ - BackupsHistoryLimit: 1, - }, - PersistentVolumeClaim: dpv1alpha1.PersistentVolumeClaim{ - Name: pointer.String("test1"), - }, - }, - Logfile: &dpv1alpha1.CommonBackupPolicy{ - BasePolicy: dpv1alpha1.BasePolicy{ - BackupsHistoryLimit: 1, - }, - PersistentVolumeClaim: dpv1alpha1.PersistentVolumeClaim{ - Name: pointer.String("test1"), + BackupMethods: []dpv1alpha1.BackupMethod{ + { + Name: BackupMethodName, + SnapshotVolumes: boolptr.False(), + ActionSetName: ActionSetName, }, }, - Schedule: dpv1alpha1.Schedule{ - Snapshot: &dpv1alpha1.SchedulePolicy{ - Enable: false, - CronExpression: "0 18 * * *", - }, - Datafile: &dpv1alpha1.SchedulePolicy{ - Enable: false, - CronExpression: "0 18 * * *", - }, - Logfile: &dpv1alpha1.SchedulePolicy{ - Enable: false, - CronExpression: "* */1 * * *", + Target: &dpv1alpha1.BackupTarget{ + PodSelector: &dpv1alpha1.PodSelector{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + constant.AppInstanceLabelKey: ClusterName, + constant.KBAppComponentLabelKey: ComponentName, + constant.AppManagedByLabelKey: constant.AppName}, + }, }, }, - Retention: &dpv1alpha1.RetentionSpec{ - TTL: &ttl, - }, }, Status: dpv1alpha1.BackupPolicyStatus{ - Phase: dpv1alpha1.PolicyAvailable, + Phase: dpv1alpha1.AvailablePhase, }, } return template @@ -451,8 +433,8 @@ func FakeBackupWithCluster(cluster *appsv1alpha1.Cluster, backupName string) *dp Name: backupName, Namespace: Namespace, Labels: map[string]string{ - constant.AppInstanceLabelKey: cluster.Name, - constant.DataProtectionLabelClusterUIDKey: string(cluster.UID), + constant.AppInstanceLabelKey: cluster.Name, + dptypes.DataProtectionLabelClusterUIDKey: string(cluster.UID), }, }, } @@ -1025,7 +1007,7 @@ func FakeBackupRepo(name string, isDefault bool) *dpv1alpha1.BackupRepo { } if isDefault { backupRepo.Annotations = map[string]string{ - constant.DefaultBackupRepoAnnotationKey: "true", + dptypes.DefaultBackupRepoAnnotationKey: "true", } } return backupRepo diff --git a/internal/cli/types/types.go b/internal/cli/types/types.go index 328b1b91a0d..d392d9702b9 100644 --- a/internal/cli/types/types.go +++ b/internal/cli/types/types.go @@ -103,7 +103,7 @@ const ( KindClusterVersion = "ClusterVersion" KindConfigConstraint = "ConfigConstraint" KindBackup = "Backup" - KindRestoreJob = "RestoreJob" + KindRestore = "Restore" KindBackupPolicy = "BackupPolicy" KindOps = "OpsRequest" ) @@ -132,13 +132,14 @@ const ( // DataProtection API group const ( - DPAPIGroup = "dataprotection.kubeblocks.io" - DPAPIVersion = "v1alpha1" - ResourceBackups = "backups" - ResourceBackupTools = "backuptools" - ResourceRestoreJobs = "restorejobs" - ResourceBackupPolicies = "backuppolicies" - ResourceBackupRepos = "backuprepos" + DPAPIGroup = "dataprotection.kubeblocks.io" + DPAPIVersion = "v1alpha1" + ResourceBackups = "backups" + ResourceActionSets = "actionsets" + ResourceRestores = "restores" + ResourceBackupPolicies = "backuppolicies" + ResourceBackupRepos = "backuprepos" + ResourceBackupSchedules = "backupschedules" ) // Extensions API group @@ -251,16 +252,20 @@ func BackupPolicyGVR() schema.GroupVersionResource { return schema.GroupVersionResource{Group: DPAPIGroup, Version: DPAPIVersion, Resource: ResourceBackupPolicies} } -func BackupToolGVR() schema.GroupVersionResource { - return schema.GroupVersionResource{Group: DPAPIGroup, Version: DPAPIVersion, Resource: ResourceBackupTools} +func BackupScheduleGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{Group: DPAPIGroup, Version: DPAPIVersion, Resource: ResourceBackupSchedules} +} + +func ActionSetGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{Group: DPAPIGroup, Version: DPAPIVersion, Resource: ResourceActionSets} } func BackupRepoGVR() schema.GroupVersionResource { return schema.GroupVersionResource{Group: DPAPIGroup, Version: DPAPIVersion, Resource: ResourceBackupRepos} } -func RestoreJobGVR() schema.GroupVersionResource { - return schema.GroupVersionResource{Group: DPAPIGroup, Version: DPAPIVersion, Resource: ResourceRestoreJobs} +func RestoreGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{Group: DPAPIGroup, Version: DPAPIVersion, Resource: ResourceRestores} } func AddonGVR() schema.GroupVersionResource { diff --git a/internal/cli/util/helm/helm.go b/internal/cli/util/helm/helm.go index a2ee2dde334..d0fead1c0de 100644 --- a/internal/cli/util/helm/helm.go +++ b/internal/cli/util/helm/helm.go @@ -182,7 +182,7 @@ func (i *InstallOpts) Install(cfg *Config) (*release.Release, error) { } var rel *release.Release - if err := retry.IfNecessary(ctx, func() error { + if err = retry.IfNecessary(ctx, func() error { release, err1 := i.tryInstall(actionCfg) if err1 != nil { return err1 @@ -212,7 +212,8 @@ func (i *InstallOpts) tryInstall(cfg *action.Configuration) (*release.Release, e // If a release does not exist, install it. histClient := action.NewHistory(cfg) histClient.Max = 1 - if _, err := histClient.Run(i.Name); err != nil && err != driver.ErrReleaseNotFound { + if _, err := histClient.Run(i.Name); err != nil && + !errors.Is(err, driver.ErrReleaseNotFound) { return nil, err } diff --git a/internal/constant/const.go b/internal/constant/const.go index 8ba749717ab..efc2de22fbc 100644 --- a/internal/constant/const.go +++ b/internal/constant/const.go @@ -111,22 +111,19 @@ const ( CMConfigurationConstraintsNameLabelKey = "config.kubeblocks.io/config-constraints-name" CMConfigurationTemplateVersion = "config.kubeblocks.io/config-template-version" ConsensusSetAccessModeLabelKey = "cs.apps.kubeblocks.io/access-mode" - BackupTypeLabelKeyKey = "dataprotection.kubeblocks.io/backup-type" - DataProtectionLabelBackupNameKey = "dataprotection.kubeblocks.io/backup-name" AddonNameLabelKey = "extensions.kubeblocks.io/addon-name" OpsRequestTypeLabelKey = "ops.kubeblocks.io/ops-type" OpsRequestNameLabelKey = "ops.kubeblocks.io/ops-name" ServiceDescriptorNameLabelKey = "servicedescriptor.kubeblocks.io/name" + RestoreForHScaleLabelKey = "apps.kubeblocks.io/restore-for-hscale" // kubeblocks.io annotations - ClusterSnapshotAnnotationKey = "kubeblocks.io/cluster-snapshot" // ClusterSnapshotAnnotationKey saves the snapshot of cluster. - DefaultClusterVersionAnnotationKey = "kubeblocks.io/is-default-cluster-version" // DefaultClusterVersionAnnotationKey specifies the default cluster version. - OpsRequestAnnotationKey = "kubeblocks.io/ops-request" // OpsRequestAnnotationKey OpsRequest annotation key in Cluster - ReconcileAnnotationKey = "kubeblocks.io/reconcile" // ReconcileAnnotationKey Notify k8s object to reconcile - RestartAnnotationKey = "kubeblocks.io/restart" // RestartAnnotationKey the annotation which notices the StatefulSet/DeploySet to restart - RestoreFromTimeAnnotationKey = "kubeblocks.io/restore-from-time" // RestoreFromTimeAnnotationKey specifies the time to recover from the backup. - RestoreFromSrcClusterAnnotationKey = "kubeblocks.io/restore-from-source-cluster" // RestoreFromSrcClusterAnnotationKey specifies the source cluster to recover from the backup. - RestoreFromBackUpAnnotationKey = "kubeblocks.io/restore-from-backup" // RestoreFromBackUpAnnotationKey specifies the component to recover from the backup. + ClusterSnapshotAnnotationKey = "kubeblocks.io/cluster-snapshot" // ClusterSnapshotAnnotationKey saves the snapshot of cluster. + DefaultClusterVersionAnnotationKey = "kubeblocks.io/is-default-cluster-version" // DefaultClusterVersionAnnotationKey specifies the default cluster version. + OpsRequestAnnotationKey = "kubeblocks.io/ops-request" // OpsRequestAnnotationKey OpsRequest annotation key in Cluster + ReconcileAnnotationKey = "kubeblocks.io/reconcile" // ReconcileAnnotationKey Notify k8s object to reconcile + RestartAnnotationKey = "kubeblocks.io/restart" // RestartAnnotationKey the annotation which notices the StatefulSet/DeploySet to restart + RestoreFromBackupAnnotationKey = "kubeblocks.io/restore-from-backup" // RestoreFromBackupAnnotationKey specifies the component to recover from the backup. SnapShotForStartAnnotationKey = "kubeblocks.io/snapshot-for-start" ComponentReplicasAnnotationKey = "apps.kubeblocks.io/component-replicas" // ComponentReplicasAnnotationKey specifies the number of pods in replicas BackupPolicyTemplateAnnotationKey = "apps.kubeblocks.io/backup-policy-template" @@ -136,12 +133,6 @@ const ( HaltRecoveryAllowInconsistentResAnnotKey = "clusters.apps.kubeblocks.io/allow-inconsistent-resource" LeaderAnnotationKey = "cs.apps.kubeblocks.io/leader" PrimaryAnnotationKey = "rs.apps.kubeblocks.io/primary" - DefaultBackupPolicyAnnotationKey = "dataprotection.kubeblocks.io/is-default-policy" // DefaultBackupPolicyAnnotationKey specifies the default backup policy. - DefaultBackupPolicyTemplateAnnotationKey = "dataprotection.kubeblocks.io/is-default-policy-template" // DefaultBackupPolicyTemplateAnnotationKey specifies the default backup policy template. - DefaultBackupRepoAnnotationKey = "dataprotection.kubeblocks.io/is-default-repo" // DefaultBackupRepoAnnotationKey specifies the default backup repo. - BackupDataPathPrefixAnnotationKey = "dataprotection.kubeblocks.io/path-prefix" // BackupDataPathPrefixAnnotationKey specifies the backup data path prefix. - ReconfigureRefAnnotationKey = "dataprotection.kubeblocks.io/reconfigure-ref" - DataProtectionLabelClusterUIDKey = "dataprotection.kubeblocks.io/cluster-uid" DisableUpgradeInsConfigurationAnnotationKey = "config.kubeblocks.io/disable-reconfigure" LastAppliedConfigAnnotationKey = "config.kubeblocks.io/last-applied-configuration" LastAppliedOpsCRAnnotationKey = "config.kubeblocks.io/last-applied-ops-name" @@ -294,29 +285,6 @@ const ( ComponentStatusDefaultPodName = "Unknown" ) -const ( - // dataProtection env names - DPTargetPodName = "DP_TARGET_POD_NAME" - DPDBHost = "DB_HOST" // db host for dataProtection - DPDBUser = "DB_USER" // db user for dataProtection - DPDBPassword = "DB_PASSWORD" // db password for dataProtection - DPBackupDIR = "BACKUP_DIR" // the dest directory for backup data - DPLogFileDIR = "BACKUP_LOGFILE_DIR" // logfile dir - DPBackupName = "BACKUP_NAME" // backup cr name - DPTTL = "TTL" // backup time to live, reference the backupPolicy.spec.retention.ttl - DPLogfileTTL = "LOGFILE_TTL" // ttl for logfile backup, one more day than backupPolicy.spec.retention.ttl - DPLogfileTTLSecond = "LOGFILE_TTL_SECOND" // ttl seconds with LOGFILE_TTL, integer format - DPArchiveInterval = "ARCHIVE_INTERVAL" // archive interval for statefulSet deploy kind, trans from the schedule cronExpression for logfile - DPBackupInfoFile = "BACKUP_INFO_FILE" // the file name which retains the backup.status info - DPTimeFormat = "TIME_FORMAT" // golang time format string - DPVolumeDataDIR = "VOLUME_DATA_DIR" // - DPKBRecoveryTime = "KB_RECOVERY_TIME" // recovery time - DPKBRecoveryTimestamp = "KB_RECOVERY_TIMESTAMP" // recovery timestamp - DPBaseBackupStartTime = "BASE_BACKUP_START_TIME" // base backup start time for pitr - DPBaseBackupStartTimestamp = "BASE_BACKUP_START_TIMESTAMP" // base backup start timestamp for pitr - DPBackupStopTime = "BACKUP_STOP_TIME" // backup stop time -) - const ( FeatureGateReplicatedStateMachine = "REPLICATED_STATE_MACHINE" // enable rsm ) @@ -332,3 +300,10 @@ const ( ServiceDescriptorEndpointKey = "endpoint" ServiceDescriptorPortKey = "port" ) + +const ( + BackupNameKeyForRestore = "name" + BackupNamespaceKeyForRestore = "namespace" + VolumeManagementPolicyKeyForRestore = "managementPolicy" + RestoreTimeKeyForRestore = "restoreTime" +) diff --git a/internal/controller/builder/builder_backup.go b/internal/controller/builder/builder_backup.go index 2faecea3375..c334b0ac18d 100644 --- a/internal/controller/builder/builder_backup.go +++ b/internal/controller/builder/builder_backup.go @@ -38,8 +38,8 @@ func (builder *BackupBuilder) SetBackupPolicyName(policyName string) *BackupBuil return builder } -func (builder *BackupBuilder) SetBackType(backupType dataprotection.BackupType) *BackupBuilder { - builder.get().Spec.BackupType = backupType +func (builder *BackupBuilder) SetBackupMethod(method string) *BackupBuilder { + builder.get().Spec.BackupMethod = method return builder } diff --git a/internal/controller/builder/builder_backup_test.go b/internal/controller/builder/builder_backup_test.go index 4c8b214fd1c..e0d2b9ff46f 100644 --- a/internal/controller/builder/builder_backup_test.go +++ b/internal/controller/builder/builder_backup_test.go @@ -22,8 +22,6 @@ package builder import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - dataprotection "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" ) var _ = Describe("backup builder", func() { @@ -33,18 +31,15 @@ var _ = Describe("backup builder", func() { ns = "default" ) policyName := "policyName" - backupType := dataprotection.BackupTypeSnapshot - parent := "parent" + backupMethod := "backupMethodName" backup := NewBackupBuilder(ns, name). SetBackupPolicyName(policyName). - SetBackType(backupType). - SetParentBackupName(parent). + SetBackupMethod(backupMethod). GetObject() Expect(backup.Name).Should(Equal(name)) Expect(backup.Namespace).Should(Equal(ns)) Expect(backup.Spec.BackupPolicyName).Should(Equal(policyName)) - Expect(backup.Spec.BackupType).Should(Equal(backupType)) - Expect(backup.Spec.ParentBackupName).Should(Equal(parent)) + Expect(backup.Spec.BackupMethod).Should(Equal(backupMethod)) }) }) diff --git a/internal/controller/component/affinity_utils.go b/internal/controller/component/affinity_utils.go index 90a2886695a..0a787a859b9 100644 --- a/internal/controller/component/affinity_utils.go +++ b/internal/controller/component/affinity_utils.go @@ -31,7 +31,7 @@ import ( viper "github.com/apecloud/kubeblocks/internal/viperx" ) -func buildPodTopologySpreadConstraints( +func BuildPodTopologySpreadConstraints( cluster *appsv1alpha1.Cluster, clusterOrCompAffinity *appsv1alpha1.Affinity, component *SynthesizedComponent, diff --git a/internal/controller/component/affinity_utils_test.go b/internal/controller/component/affinity_utils_test.go index ae5a87d5423..08086d270f0 100644 --- a/internal/controller/component/affinity_utils_test.go +++ b/internal/controller/component/affinity_utils_test.go @@ -114,7 +114,7 @@ var _ = Describe("affinity utils", func() { Expect(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution).Should(BeEmpty()) Expect(affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution).Should(BeEmpty()) - topologySpreadConstraints := buildPodTopologySpreadConstraints(clusterObj, clusterObj.Spec.Affinity, component) + topologySpreadConstraints := BuildPodTopologySpreadConstraints(clusterObj, clusterObj.Spec.Affinity, component) Expect(topologySpreadConstraints[0].WhenUnsatisfiable).Should(Equal(corev1.DoNotSchedule)) Expect(topologySpreadConstraints[0].TopologyKey).Should(Equal(topologyKey)) }) @@ -131,7 +131,7 @@ var _ = Describe("affinity utils", func() { Expect(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution).Should(BeEmpty()) Expect(affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions[0].Key).Should(Equal(nodeKey)) - topologySpreadConstraints := buildPodTopologySpreadConstraints(clusterObj, clusterObj.Spec.Affinity, component) + topologySpreadConstraints := BuildPodTopologySpreadConstraints(clusterObj, clusterObj.Spec.Affinity, component) Expect(topologySpreadConstraints[0].WhenUnsatisfiable).Should(Equal(corev1.DoNotSchedule)) Expect(topologySpreadConstraints[0].TopologyKey).Should(Equal(topologyKey)) }) @@ -175,7 +175,7 @@ var _ = Describe("affinity utils", func() { Expect(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight).ShouldNot(BeNil()) Expect(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.TopologyKey).Should(Equal(topologyKey)) - topologySpreadConstraints := buildPodTopologySpreadConstraints(clusterObj, clusterObj.Spec.Affinity, component) + topologySpreadConstraints := BuildPodTopologySpreadConstraints(clusterObj, clusterObj.Spec.Affinity, component) Expect(topologySpreadConstraints[0].WhenUnsatisfiable).Should(Equal(corev1.ScheduleAnyway)) Expect(topologySpreadConstraints[0].TopologyKey).Should(Equal(topologyKey)) }) diff --git a/internal/controller/component/component.go b/internal/controller/component/component.go index f8a7dc77db0..5a7401f4357 100644 --- a/internal/controller/component/component.go +++ b/internal/controller/component/component.go @@ -244,7 +244,7 @@ func buildComponent(reqCtx intctrlutil.RequestCtx, reqCtx.Log.Error(err, "build pod affinity failed.") return nil, err } - component.PodSpec.TopologySpreadConstraints = buildPodTopologySpreadConstraints(cluster, affinity, component) + component.PodSpec.TopologySpreadConstraints = BuildPodTopologySpreadConstraints(cluster, affinity, component) if component.PodSpec.Tolerations, err = BuildTolerations(cluster, clusterCompSpec); err != nil { reqCtx.Log.Error(err, "build pod tolerations failed.") return nil, err diff --git a/internal/controller/component/suite_test.go b/internal/controller/component/suite_test.go index 4e4d91fff33..071691f53c7 100644 --- a/internal/controller/component/suite_test.go +++ b/internal/controller/component/suite_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/testutil" viper "github.com/apecloud/kubeblocks/internal/viperx" @@ -99,7 +99,7 @@ var _ = BeforeSuite(func() { err = appsv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = dataprotectionv1alpha1.AddToScheme(scheme.Scheme) + err = dpv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapshotv1.AddToScheme(scheme.Scheme) diff --git a/internal/controller/configuration/suite_test.go b/internal/controller/configuration/suite_test.go index 236a37cf5cf..42d876b8f5d 100644 --- a/internal/controller/configuration/suite_test.go +++ b/internal/controller/configuration/suite_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/testutil" viper "github.com/apecloud/kubeblocks/internal/viperx" @@ -100,7 +100,7 @@ var _ = BeforeSuite(func() { err = appsv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = dataprotectionv1alpha1.AddToScheme(scheme.Scheme) + err = dpv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapshotv1.AddToScheme(scheme.Scheme) diff --git a/internal/controller/factory/builder.go b/internal/controller/factory/builder.go index ea6bc24fce1..72469c2a08f 100644 --- a/internal/controller/factory/builder.go +++ b/internal/controller/factory/builder.go @@ -40,7 +40,7 @@ import ( "k8s.io/apimachinery/pkg/util/rand" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" cfgcm "github.com/apecloud/kubeblocks/internal/configuration/config_manager" "github.com/apecloud/kubeblocks/internal/constant" @@ -48,6 +48,7 @@ import ( "github.com/apecloud/kubeblocks/internal/controller/component" "github.com/apecloud/kubeblocks/internal/controller/rsm" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" ) const ( @@ -196,6 +197,16 @@ func BuildPersistentVolumeClaimLabels(component *component.SynthesizedComponent, } } +func BuildCommonLabels(cluster *appsv1alpha1.Cluster, + component *component.SynthesizedComponent) map[string]string { + return map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppNameLabelKey: component.ClusterDefName, + constant.AppInstanceLabelKey: cluster.Name, + constant.KBAppComponentLabelKey: component.Name, + } +} + func BuildSts(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, envConfigName string) (*appsv1.StatefulSet, error) { vctToPVC := func(vct corev1.PersistentVolumeClaimTemplate) corev1.PersistentVolumeClaim { @@ -205,12 +216,7 @@ func BuildSts(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, } } - commonLabels := map[string]string{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppNameLabelKey: component.ClusterDefName, - constant.AppInstanceLabelKey: cluster.Name, - constant.KBAppComponentLabelKey: component.Name, - } + commonLabels := BuildCommonLabels(cluster, component) podBuilder := builder.NewPodBuilder("", ""). AddLabelsInMap(commonLabels). AddLabels(constant.AppComponentLabelKey, component.CompDefName). @@ -625,7 +631,7 @@ func buildActionFromCharacterType(characterType string, isConsensus bool) []work { Image: "registry.cn-hangzhou.aliyuncs.com/apecloud/mongo:5.0.14", Command: []string{ - "Status=$(export CLIENT=`which mongosh>/dev/null&&echo mongosh||echo mongo`; $CLIENT -u $KB_RSM_USERNAME -p $KB_RSM_PASSWORD 127.0.0.1:27017 --quiet --eval \"JSON.stringify(rs.status())\") &&", + "Status=$(export CLIENT=`which mongosh>/dev/null&&echo mongosh||echo mongo`; $CLIENT -u $KB_RSM_USERNAME -p $KB_RSM_PASSWORD 127.0.0.1:27017 --authenticationDatabase admin --quiet --eval \"JSON.stringify(rs.status())\") &&", "MyState=$(echo $Status | jq '.myState') &&", "echo $Status | jq \".members[] | select(.state == ($MyState | tonumber)) | .stateStr\" |tr '[:upper:]' '[:lower:]' | xargs echo -n", }, @@ -803,17 +809,17 @@ func BuildBackup(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, backupPolicyName string, backupKey types.NamespacedName, - backupType string) *dataprotectionv1alpha1.Backup { + backupMethod string) *dpv1alpha1.Backup { return builder.NewBackupBuilder(backupKey.Namespace, backupKey.Name). - AddLabels(constant.BackupTypeLabelKeyKey, backupType). + AddLabels(dptypes.DataProtectionLabelBackupMethodKey, backupMethod). + AddLabels(dptypes.DataProtectionLabelBackupPolicyKey, backupPolicyName). AddLabels(constant.KBManagedByKey, "cluster"). - AddLabels("backuppolicies.dataprotection.kubeblocks.io/name", backupPolicyName). AddLabels(constant.AppNameLabelKey, component.ClusterDefName). AddLabels(constant.AppInstanceLabelKey, cluster.Name). AddLabels(constant.AppManagedByLabelKey, constant.AppName). AddLabels(constant.KBAppComponentLabelKey, component.Name). SetBackupPolicyName(backupPolicyName). - SetBackType(dataprotectionv1alpha1.BackupType(backupType)). + SetBackupMethod(backupMethod). GetObject() } @@ -899,24 +905,6 @@ func BuildCfgManagerContainer(sidecarRenderedParam *cfgcm.CfgManagerBuildParams, return container, nil } -func BuildBackupManifestsJob(key types.NamespacedName, backup *dataprotectionv1alpha1.Backup, podSpec *corev1.PodSpec) *batchv1.Job { - spec := podSpec.DeepCopy() - spec.RestartPolicy = corev1.RestartPolicyNever - ctx := spec.SecurityContext - if ctx == nil { - ctx = &corev1.PodSecurityContext{} - } - user := int64(0) - ctx.RunAsUser = &user - spec.SecurityContext = ctx - return builder.NewJobBuilder(key.Namespace, key.Name). - AddLabels(constant.AppManagedByLabelKey, constant.AppName). - SetPodTemplateSpec(corev1.PodTemplateSpec{Spec: *spec}). - SetBackoffLimit(3). - SetTTLSecondsAfterFinished(10). - GetObject() -} - func BuildRestoreJob(cluster *appsv1alpha1.Cluster, synthesizedComponent *component.SynthesizedComponent, name, image string, command []string, volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, env []corev1.EnvVar, resources *corev1.ResourceRequirements) (*batchv1.Job, error) { containerBuilder := builder.NewContainerBuilder("restore"). diff --git a/internal/controller/factory/builder_test.go b/internal/controller/factory/builder_test.go index 5a7cb4526a8..341beba304f 100644 --- a/internal/controller/factory/builder_test.go +++ b/internal/controller/factory/builder_test.go @@ -34,7 +34,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" cfgcm "github.com/apecloud/kubeblocks/internal/configuration/config_manager" "github.com/apecloud/kubeblocks/internal/constant" @@ -449,21 +448,6 @@ var _ = Describe("builder", func() { Expect(*configmap.SecurityContext.RunAsUser).Should(BeEquivalentTo(int64(0))) }) - It("builds backup manifests job correctly", func() { - backup := &dataprotectionv1alpha1.Backup{} - podSpec := &corev1.PodSpec{ - Containers: []corev1.Container{ - { - Command: []string{"sh"}, - }, - }, - } - key := types.NamespacedName{Name: "backup", Namespace: "default"} - job := BuildBackupManifestsJob(key, backup, podSpec) - Expect(job).ShouldNot(BeNil()) - Expect(job.Name).Should(Equal(key.Name)) - }) - It("builds restore job correctly", func() { key := types.NamespacedName{Name: "restore", Namespace: "default"} volumes := []corev1.Volume{} diff --git a/internal/controller/factory/suite_test.go b/internal/controller/factory/suite_test.go index a11f020e4f8..f599feedbe8 100644 --- a/internal/controller/factory/suite_test.go +++ b/internal/controller/factory/suite_test.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/testutil" viper "github.com/apecloud/kubeblocks/internal/viperx" @@ -100,7 +100,7 @@ var _ = BeforeSuite(func() { err = appsv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = dataprotectionv1alpha1.AddToScheme(scheme.Scheme) + err = dpv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapshotv1.AddToScheme(scheme.Scheme) diff --git a/internal/controller/plan/restore.go b/internal/controller/plan/restore.go index 6c1237ad6f7..1cb521b6f92 100644 --- a/internal/controller/plan/restore.go +++ b/internal/controller/plan/restore.go @@ -21,20 +21,15 @@ package plan import ( "context" - "encoding/json" - "errors" "fmt" - "sort" - "strconv" "strings" - "time" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -56,824 +51,303 @@ type RestoreManager struct { Scheme *k8sruntime.Scheme // private - namespace string - restoreTime *metav1.Time - sourceCluster string -} - -func NewRestoreManager(ctx context.Context, cli client.Client, cluster *appsv1alpha1.Cluster, scheme *k8sruntime.Scheme) *RestoreManager { + namespace string + restoreTime string + volumeManagementPolicy dpv1alpha1.VolumeClaimManagementPolicy + startingIndex int32 + replicas int32 + restoreLabels map[string]string +} + +func NewRestoreManager(ctx context.Context, + cli client.Client, + cluster *appsv1alpha1.Cluster, + scheme *k8sruntime.Scheme, + restoreLabels map[string]string, + replicas, startingIndex int32, +) *RestoreManager { return &RestoreManager{ - Cluster: cluster, - Client: cli, - Ctx: ctx, - Scheme: scheme, + Cluster: cluster, + Client: cli, + Ctx: ctx, + Scheme: scheme, + replicas: replicas, + startingIndex: startingIndex, + namespace: cluster.Namespace, + volumeManagementPolicy: dpv1alpha1.ParallelManagementPolicy, + restoreLabels: restoreLabels, } } -const ( - backupVolumePATH = "/backupdata" -) - -// DoRestore prepares restore jobs -func DoRestore(ctx context.Context, cli client.Client, cluster *appsv1alpha1.Cluster, - component *component.SynthesizedComponent, schema *k8sruntime.Scheme) error { - if cluster.Status.ObservedGeneration > 1 { - return nil - } - - mgr := NewRestoreManager(ctx, cli, cluster, schema) - - // check restore from backup - backupObj, err := mgr.getBackupObjectFromAnnotation(component) +func (r *RestoreManager) DoRestore(comp *component.SynthesizedComponent) error { + backupObj, err := r.initFromAnnotation(comp) if err != nil { return err } if backupObj == nil { return nil } - - if err = mgr.createDataPVCs(component, backupObj); err != nil { - return err + if backupObj.Status.BackupMethod == nil { + return intctrlutil.NewErrorf(intctrlutil.ErrorTypeRestoreFailed, `status.backupMethod of backup "%s" can not be empty`, backupObj.Name) } - jobs := make([]client.Object, 0) - if backupObj.Spec.BackupType == dpv1alpha1.BackupTypeDataFile { - restoreJobs, err := mgr.doFullBackupRestore(component, backupObj) - if err != nil { - return err - } - jobs = append(jobs, restoreJobs...) + if err = r.DoPrepareData(comp, backupObj); err != nil { + return err } - - // create and waiting job finished - if err = mgr.createJobsAndWaiting(jobs); err != nil { + if err = r.DoPostReady(comp, backupObj); err != nil { return err } - // do clean up - if err = mgr.cleanupClusterAnnotations(); err != nil { + if err = r.cleanupClusterAnnotations(); err != nil { return err } - return mgr.cleanupJobs(jobs) + return r.cleanupRestores(comp) } -// DoPITR prepares PITR jobs -func DoPITR(ctx context.Context, cli client.Client, cluster *appsv1alpha1.Cluster, - component *component.SynthesizedComponent, schema *k8sruntime.Scheme) error { - if cluster.Status.ObservedGeneration > 1 { - return nil - } - pitrMgr := NewRestoreManager(ctx, cli, cluster, schema) - if need, err := pitrMgr.checkPITRAndInit(component.Name); err != nil { - return err - } else if !need { - return nil - } - - // get the latest base backup from point in time - baseBackup, err := pitrMgr.getLatestBaseBackup(component.Name) - if err != nil { - return err - } - - if err = pitrMgr.createDataPVCs(component, baseBackup); err != nil { - return err - } - - jobs := make([]client.Object, 0) - if baseBackup.Spec.BackupType == dpv1alpha1.BackupTypeDataFile { - restoreJobs, err := pitrMgr.doFullBackupRestore(component, baseBackup) - if err != nil { - return err - } - jobs = append(jobs, restoreJobs...) - } - - continuousJobs, err := pitrMgr.doLogfileBackupRestore(component, baseBackup) +func (r *RestoreManager) DoPrepareData(comp *component.SynthesizedComponent, backupObj *dpv1alpha1.Backup) error { + restore, err := r.BuildPrepareDataRestore(comp, backupObj) if err != nil { return err } - // do clean up - if err = pitrMgr.cleanupClusterAnnotations(); err != nil { - return err - } - jobs = append(jobs, continuousJobs...) - return pitrMgr.cleanupJobs(jobs) + return r.createRestoreAndWait(restore) } -func (p *RestoreManager) doFullBackupRestore(component *component.SynthesizedComponent, - backupObj *dpv1alpha1.Backup) ([]client.Object, error) { - backupTool, err := p.getBackupTool(backupObj.Status.BackupToolName) - if err != nil { - return nil, err +func (r *RestoreManager) BuildPrepareDataRestore(comp *component.SynthesizedComponent, backupObj *dpv1alpha1.Backup) (*dpv1alpha1.Restore, error) { + backupMethod := backupObj.Status.BackupMethod + if backupMethod == nil { + return nil, intctrlutil.NewErrorf(intctrlutil.ErrorTypeRestoreFailed, `status.backupMethod of backup "%s" can not be empty`, backupObj.Name) } - var jobs []client.Object - if backupTool.Spec.Physical.GetPhysicalRestoreCommand() != nil { - dataFileJobs, err := p.BuildDatafileRestoreJob(component, backupObj, backupTool) - if err != nil { - return nil, err - } - jobs = append(jobs, dataFileJobs...) + targetVolumes := backupMethod.TargetVolumes + if targetVolumes == nil { + return nil, nil } - - if backupTool.Spec.Logical.GetLogicalRestoreCommand() != nil { - logicalJobEnvs := p.buildCommonEnvs(backupObj) - logicJobs, err := p.buildLogicRestoreJob(component, backupObj, &backupTool.Spec, logicalJobEnvs...) - if err != nil { - return nil, err + getClusterJSON := func() string { + clusterSpec := r.Cluster.DeepCopy() + clusterSpec.ObjectMeta = metav1.ObjectMeta{ + Name: clusterSpec.GetName(), + UID: clusterSpec.GetUID(), + } + clusterSpec.Status = appsv1alpha1.ClusterStatus{} + b, _ := json.Marshal(*clusterSpec) + return string(b) + } + + var templates []dpv1alpha1.RestoreVolumeClaim + pvcLabels := factory.BuildCommonLabels(r.Cluster, comp) + for _, v := range comp.VolumeClaimTemplates { + if !r.existVolumeSource(targetVolumes, v.Name) { + continue + } + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", v.Name, r.Cluster.Name, comp.Name), + Labels: pvcLabels, + Annotations: map[string]string{ + // satisfy the detection of transformer_halt_recovering. + constant.LastAppliedClusterAnnotationKey: getClusterJSON(), + }, + }, } - jobs = append(jobs, logicJobs...) - } - // do create datafile restore job and check completed - if err = p.createJobsAndWaiting(jobs); err != nil { - return nil, err + // build pvc labels + factory.BuildPersistentVolumeClaimLabels(comp, pvc, v.Name) + claimTemplate := dpv1alpha1.RestoreVolumeClaim{ + ObjectMeta: pvc.ObjectMeta, + VolumeClaimSpec: v.Spec, + VolumeConfig: dpv1alpha1.VolumeConfig{ + VolumeSource: v.Name, + }, + } + templates = append(templates, claimTemplate) } - return jobs, nil -} - -func (p *RestoreManager) doLogfileBackupRestore(component *component.SynthesizedComponent, - baseBackup *dpv1alpha1.Backup) ([]client.Object, error) { - sourceClusterUID := baseBackup.Labels[constant.DataProtectionLabelClusterUIDKey] - logfileBackup, err := p.getLogfileBackup(component.Name, sourceClusterUID) - if err != nil { - return nil, err + if len(templates) == 0 { + return nil, nil } - - recoveryInfo, err := p.getRecoveryInfo(baseBackup, logfileBackup) + schedulingSpec, err := r.buildSchedulingSpec(comp) if err != nil { return nil, err } - var continuousJobs []client.Object - if len(recoveryInfo.Physical.GetPhysicalRestoreCommand()) != 0 { - prepareDataJobs, err := p.buildPITRPhysicalRestoreJob(component, recoveryInfo, logfileBackup) - if err != nil { - return nil, err - } - continuousJobs = append(continuousJobs, prepareDataJobs...) + restore := &dpv1alpha1.Restore{ + ObjectMeta: r.GetRestoreObjectMeta(comp, dpv1alpha1.PrepareData), + Spec: dpv1alpha1.RestoreSpec{ + Backup: dpv1alpha1.BackupConfig{ + Name: backupObj.Name, + Namespace: r.namespace, + }, + RestoreTime: r.restoreTime, + PrepareDataConfig: &dpv1alpha1.PrepareDataConfig{ + SchedulingSpec: schedulingSpec, + VolumeClaimManagementPolicy: r.volumeManagementPolicy, + RestoreVolumeClaimsTemplate: &dpv1alpha1.RestoreVolumeClaimsTemplate{ + Replicas: r.replicas, + StartingIndex: r.startingIndex, + Templates: templates, + }, + }, + }, } + return restore, nil +} - if len(recoveryInfo.Logical.GetLogicalRestoreCommand()) != 0 { - postReadyJobs, err := p.buildLogicRestoreJob(component, logfileBackup, recoveryInfo, recoveryInfo.Env...) - if err != nil { - return nil, err - } - continuousJobs = append(continuousJobs, postReadyJobs...) +func (r *RestoreManager) DoPostReady(comp *component.SynthesizedComponent, backupObj *dpv1alpha1.Backup) error { + compStatus := r.Cluster.Status.Components[comp.Name] + if compStatus.Phase != appsv1alpha1.RunningClusterCompPhase { + return nil } - - // do create PITR job and check completed - if err = p.createJobsAndWaiting(continuousJobs); err != nil { - return nil, err + jobActionLabels := factory.BuildCommonLabels(r.Cluster, comp) + if comp.WorkloadType == appsv1alpha1.Consensus || comp.WorkloadType == appsv1alpha1.Replication { + // TODO: use rsm constant + rsmAccessModeLabelKey := "rsm.workloads.kubeblocks.io/access-mode" + jobActionLabels[rsmAccessModeLabelKey] = string(appsv1alpha1.ReadWrite) + } + // TODO: get connect credential from backupPolicyTemplate + restore := &dpv1alpha1.Restore{ + ObjectMeta: r.GetRestoreObjectMeta(comp, dpv1alpha1.PostReady), + Spec: dpv1alpha1.RestoreSpec{ + Backup: dpv1alpha1.BackupConfig{ + Name: backupObj.Name, + Namespace: r.namespace, + }, + RestoreTime: r.restoreTime, + ReadyConfig: &dpv1alpha1.ReadyConfig{ + ExecAction: &dpv1alpha1.ExecAction{ + Target: dpv1alpha1.ExecActionTarget{ + PodSelector: metav1.LabelSelector{ + MatchLabels: factory.BuildCommonLabels(r.Cluster, comp), + }, + }, + }, + JobAction: &dpv1alpha1.JobAction{ + Target: dpv1alpha1.JobActionTarget{ + PodSelector: metav1.LabelSelector{ + MatchLabels: jobActionLabels, + }, + }, + }, + }, + }, } - return continuousJobs, nil + return r.createRestoreAndWait(restore) } -func (p *RestoreManager) listCompletedBackups(componentName string) (backupItems []dpv1alpha1.Backup, err error) { - backups := dpv1alpha1.BackupList{} - if err := p.Client.List(p.Ctx, &backups, - client.InNamespace(p.namespace), - client.MatchingLabels(map[string]string{ - constant.AppInstanceLabelKey: p.sourceCluster, - constant.KBAppComponentLabelKey: componentName, - }), - ); err != nil { - return nil, err +func (r *RestoreManager) buildSchedulingSpec(comp *component.SynthesizedComponent) (dpv1alpha1.SchedulingSpec, error) { + var err error + schedulingSpec := dpv1alpha1.SchedulingSpec{} + compSpec := r.Cluster.Spec.GetComponentByName(comp.Name) + affinity := component.BuildAffinity(r.Cluster, compSpec) + if schedulingSpec.Affinity, err = component.BuildPodAffinity(r.Cluster, affinity, comp); err != nil { + return schedulingSpec, err } - - backupItems = []dpv1alpha1.Backup{} - for _, b := range backups.Items { - if b.Status.Phase == dpv1alpha1.BackupCompleted && b.Status.Manifests != nil && b.Status.Manifests.BackupLog != nil { - backupItems = append(backupItems, b) - } + schedulingSpec.TopologySpreadConstraints = component.BuildPodTopologySpreadConstraints(r.Cluster, affinity, comp) + if schedulingSpec.Tolerations, err = component.BuildTolerations(r.Cluster, compSpec); err != nil { + return schedulingSpec, err } - return backupItems, nil + return schedulingSpec, nil } -// sortBackups sorts by StopTime -func (p *RestoreManager) sortBackups(backups []dpv1alpha1.Backup, reverse bool) []dpv1alpha1.Backup { - sort.Slice(backups, func(i, j int) bool { - if reverse { - i, j = j, i - } - if backups[i].Status.Manifests.BackupLog.StopTime == nil && backups[j].Status.Manifests.BackupLog.StopTime != nil { - return false - } - if backups[i].Status.Manifests.BackupLog.StopTime != nil && backups[j].Status.Manifests.BackupLog.StopTime == nil { - return true - } - if backups[i].Status.Manifests.BackupLog.StopTime.Equal(backups[j].Status.Manifests.BackupLog.StopTime) { - return backups[i].Name < backups[j].Name - } - return backups[i].Status.Manifests.BackupLog.StopTime.Before(backups[j].Status.Manifests.BackupLog.StopTime) - }) - return backups -} - -// getLatestBaseBackup gets the latest baseBackup -func (p *RestoreManager) getLatestBaseBackup(componentName string) (*dpv1alpha1.Backup, error) { - // 1. sorts reverse backups - backups, err := p.listCompletedBackups(componentName) - if err != nil { - return nil, err +func (r *RestoreManager) GetRestoreObjectMeta(comp *component.SynthesizedComponent, stage dpv1alpha1.RestoreStage) metav1.ObjectMeta { + name := fmt.Sprintf("%s-%s-%s-%s", r.Cluster.Name, comp.Name, r.Cluster.UID[:8], strings.ToLower(string(stage))) + if r.startingIndex != 0 { + name = fmt.Sprintf("%s-%d", name, r.startingIndex) } - backups = p.sortBackups(backups, true) - - // 2. gets the latest backup object - var latestBackup *dpv1alpha1.Backup - for _, item := range backups { - if item.Spec.BackupType != dpv1alpha1.BackupTypeLogFile && - item.Status.Manifests.BackupLog.StopTime != nil && !p.restoreTime.Before(item.Status.Manifests.BackupLog.StopTime) { - latestBackup = &item - break - } + if len(r.restoreLabels) == 0 { + r.restoreLabels = factory.BuildCommonLabels(r.Cluster, comp) } - if latestBackup == nil { - return nil, errors.New("can not found latest base backup") + return metav1.ObjectMeta{ + Name: name, + Namespace: r.Cluster.Namespace, + Labels: r.restoreLabels, } - - return latestBackup, nil } -// checkPITRAndInit checks if cluster need to be restored -func (p *RestoreManager) checkPITRAndInit(compName string) (bool, error) { - // checks args if pitr supported - cluster := p.Cluster - restoreTimeStr, err := p.getComponentBackupInfoFromAnnotation(compName, constant.RestoreFromTimeAnnotationKey) - if err != nil || restoreTimeStr == nil { - return false, err - } - sourceCuster := cluster.Annotations[constant.RestoreFromSrcClusterAnnotationKey] - if sourceCuster == "" { - return false, errors.New("need specify a source cluster name to recovery") - } - restoreTime := &metav1.Time{} - if err = restoreTime.UnmarshalQueryParameter(*restoreTimeStr); err != nil { - return false, err - } - vctCount := 0 - for _, item := range cluster.Spec.ComponentSpecs { - vctCount += len(item.VolumeClaimTemplates) +// existVolumeSource checks if the backup.status.backupMethod.targetVolumes exists the target volume which should be restored. +func (r *RestoreManager) existVolumeSource(targetVolumes *dpv1alpha1.TargetVolumeInfo, volumeName string) bool { + for _, v := range targetVolumes.Volumes { + if v == volumeName { + return true + } } - if vctCount == 0 { - return false, errors.New("not support pitr without any volume claim templates") + for _, v := range targetVolumes.VolumeMounts { + if v.Name == volumeName { + return true + } } - - // init args - p.restoreTime = restoreTime - p.sourceCluster = sourceCuster - p.namespace = cluster.Namespace - return true, nil + return false } -func (p *RestoreManager) getComponentBackupInfoFromAnnotation(compName, annotationKey string) (*string, error) { - valueString := p.Cluster.Annotations[annotationKey] +func (r *RestoreManager) initFromAnnotation(synthesizedComponent *component.SynthesizedComponent) (*dpv1alpha1.Backup, error) { + valueString := r.Cluster.Annotations[constant.RestoreFromBackupAnnotationKey] if len(valueString) == 0 { return nil, nil } - backupMap := map[string]string{} + backupMap := map[string]map[string]string{} err := json.Unmarshal([]byte(valueString), &backupMap) if err != nil { return nil, err } - targetValue, ok := backupMap[compName] + backupSource, ok := backupMap[synthesizedComponent.Name] if !ok { return nil, nil } - return &targetValue, nil -} - -func getVolumeMount(spec *dpv1alpha1.BackupToolSpec) string { - dataVolumeMount := "/data" - // TODO: hack it because the mount path is not explicitly specified in cluster definition - for _, env := range spec.Env { - if env.Name == constant.DPVolumeDataDIR { - dataVolumeMount = env.Value - break - } - } - return dataVolumeMount -} - -// getRecoveryInfo gets the pitr recovery info from baseBackup and logfileBackup -func (p *RestoreManager) getRecoveryInfo(baseBackup, logfileBackup *dpv1alpha1.Backup) (*dpv1alpha1.BackupToolSpec, error) { - // gets scripts from backup template - backupTool := dpv1alpha1.BackupTool{} - if err := p.Client.Get(p.Ctx, types.NamespacedName{ - Name: logfileBackup.Status.BackupToolName, - }, &backupTool); err != nil { - return nil, err + if namespace := backupSource[constant.BackupNamespaceKeyForRestore]; namespace != "" { + r.namespace = namespace } - // build recovery env - headEnv := p.buildCommonEnvs(logfileBackup) - // build env of recovery time - spec := &backupTool.Spec - timeFormat := p.getTimeFormat(spec.Env) - headEnv = append(headEnv, corev1.EnvVar{Name: constant.DPKBRecoveryTime, Value: p.restoreTime.UTC().Format(timeFormat)}) - headEnv = append(headEnv, corev1.EnvVar{Name: constant.DPKBRecoveryTimestamp, Value: strconv.FormatInt(p.restoreTime.Unix(), 10)}) - // build env of backup startTime and user contexts - if baseBackup.Status.Manifests != nil { - // inject env for backup startTime - backupLog := baseBackup.Status.Manifests.BackupLog - startTime := baseBackup.Status.StartTimestamp - if backupLog != nil && backupLog.StartTime != nil { - startTime = backupLog.StartTime - } - if startTime != nil { - startTimeEnv := corev1.EnvVar{Name: constant.DPBaseBackupStartTime, Value: startTime.UTC().Format(timeFormat)} - startTimeTimestampEnv := corev1.EnvVar{Name: constant.DPBaseBackupStartTimestamp, Value: strconv.FormatInt(startTime.Unix(), 10)} - headEnv = append(headEnv, startTimeEnv, startTimeTimestampEnv) - } - // inject env for user contexts - backupUserContext := baseBackup.Status.Manifests.UserContext - for k, v := range backupUserContext { - headEnv = append(headEnv, corev1.EnvVar{Name: strings.ToUpper(k), Value: v}) - } - } - spec.Env = append(headEnv, spec.Env...) - return spec, nil -} - -func (p *RestoreManager) getLogfileBackup(componentName string, sourceClusterUID string) (*dpv1alpha1.Backup, error) { - logfileBackupList := dpv1alpha1.BackupList{} - if err := p.Client.List(p.Ctx, &logfileBackupList, - client.MatchingLabels{ - constant.AppInstanceLabelKey: p.sourceCluster, - constant.KBAppComponentLabelKey: componentName, - constant.BackupTypeLabelKeyKey: string(dpv1alpha1.BackupTypeLogFile), - }); err != nil { - return nil, err - } - if len(logfileBackupList.Items) == 0 { - return nil, errors.New("not found logfile backups") - } - backups := p.sortBackups(logfileBackupList.Items, true) - for _, v := range backups { - // filter backups with cluster uid for excluding same cluster name - if v.Labels[constant.DataProtectionLabelClusterUIDKey] == sourceClusterUID { - return &v, nil - } - } - // TODO: return an error if logfile backup is not found after v0.7.0, return the first logfile for compatibility with version v0.5.0. - return &logfileBackupList.Items[0], nil -} - -func (p *RestoreManager) getLogfilePVC(logfileBackup *dpv1alpha1.Backup) (*corev1.PersistentVolumeClaim, error) { - pvcKey := types.NamespacedName{ - Name: logfileBackup.Status.PersistentVolumeClaimName, - Namespace: logfileBackup.Namespace, - } - pvc := corev1.PersistentVolumeClaim{} - if err := p.Client.Get(p.Ctx, pvcKey, &pvc); err != nil { - return nil, err - } - return &pvc, nil -} - -func (p *RestoreManager) getDataPVCs(componentName string) ([]corev1.PersistentVolumeClaim, error) { - dataPVCList := corev1.PersistentVolumeClaimList{} - pvcLabels := map[string]string{ - constant.AppInstanceLabelKey: p.Cluster.Name, - constant.KBAppComponentLabelKey: componentName, - constant.VolumeTypeLabelKey: string(appsv1alpha1.VolumeTypeData), - } - if err := p.Client.List(p.Ctx, &dataPVCList, - client.InNamespace(p.namespace), - client.MatchingLabels(pvcLabels)); err != nil { - return nil, err - } - return dataPVCList.Items, nil -} - -// When the pvc has been bound on the determined pod, -// this is a little different from the getDataPVCs function, -// we need to get the node name of the pvc according to the pod, -// and the job must be the same as the node name of the pvc -func (p *RestoreManager) getDataPVCsAndPods(componentName string, podRestoreScope dpv1alpha1.PodRestoreScope) (map[string]corev1.Pod, error) { - podList := corev1.PodList{} - podLabels := map[string]string{ - constant.AppInstanceLabelKey: p.Cluster.Name, - constant.KBAppComponentLabelKey: componentName, - } - if err := p.Client.List(p.Ctx, &podList, - client.InNamespace(p.namespace), - client.MatchingLabels(podLabels)); err != nil { - return nil, err - } - dataPVCsAndPodsMap := map[string]corev1.Pod{} - for _, targetPod := range podList.Items { - for _, volume := range targetPod.Spec.Volumes { - if volume.PersistentVolumeClaim == nil { - continue - } - dataPVC := corev1.PersistentVolumeClaim{} - pvcKey := types.NamespacedName{Namespace: targetPod.Namespace, Name: volume.PersistentVolumeClaim.ClaimName} - if err := p.Client.Get(p.Ctx, pvcKey, &dataPVC); err != nil { - return nil, err - } - if dataPVC.Labels[constant.VolumeTypeLabelKey] != string(appsv1alpha1.VolumeTypeData) { - continue - } - if podRestoreScope == dpv1alpha1.PodRestoreScopeAll { - dataPVCsAndPodsMap[dataPVC.Name] = targetPod - continue - } - if podRestoreScope == dpv1alpha1.PodRestoreScopeReadWrite { - if targetPod.Labels[constant.ConsensusSetAccessModeLabelKey] == string(appsv1alpha1.ReadWrite) || - targetPod.Labels[constant.RoleLabelKey] == constant.Primary { - dataPVCsAndPodsMap[dataPVC.Name] = targetPod - break - } - } - } - } - return dataPVCsAndPodsMap, nil -} - -func (p *RestoreManager) getDataVCT(synthesizedComponent *component.SynthesizedComponent) corev1.PersistentVolumeClaimTemplate { - vctMap := map[string]corev1.PersistentVolumeClaimTemplate{} - for _, vct := range synthesizedComponent.VolumeClaimTemplates { - vctMap[vct.Name] = vct - } - for _, vt := range synthesizedComponent.VolumeTypes { - if vt.Type == appsv1alpha1.VolumeTypeData { - return vctMap[vt.Name] - - } - } - if len(synthesizedComponent.VolumeClaimTemplates) != 0 { - return synthesizedComponent.VolumeClaimTemplates[0] - } - return corev1.PersistentVolumeClaimTemplate{} -} - -func (p *RestoreManager) createDataPVCs(synthesizedComponent *component.SynthesizedComponent, backup *dpv1alpha1.Backup) error { - // determines the data volume type - vct := p.getDataVCT(synthesizedComponent) - if vct.Name == "" { - return intctrlutil.NewNotFound("can not found any PersistentVolumeClaim of data type") - } - - snapshotName := "" - if backup != nil && backup.Spec.BackupType == dpv1alpha1.BackupTypeSnapshot { - snapshotName = backup.Name - } - for i := int32(0); i < synthesizedComponent.Replicas; i++ { - pvcName := fmt.Sprintf("%s-%s-%s-%d", vct.Name, p.Cluster.Name, synthesizedComponent.Name, i) - pvcKey := types.NamespacedName{Namespace: p.Cluster.Namespace, Name: pvcName} - pvc := factory.BuildPVC(p.Cluster, synthesizedComponent, &vct, pvcKey, snapshotName) - // Prevents halt recovery from checking uncleaned resources - if pvc.Annotations == nil { - pvc.Annotations = map[string]string{} - } - pvc.Annotations[constant.LastAppliedClusterAnnotationKey] = - fmt.Sprintf(`{"metadata":{"uid":"%s","name":"%s"}}`, p.Cluster.UID, p.Cluster.Name) - - if err := p.Client.Create(p.Ctx, pvc); err != nil && !apierrors.IsAlreadyExists(err) { - return err - } - } - return nil -} - -func (p *RestoreManager) getBackupObjectFromAnnotation(synthesizedComponent *component.SynthesizedComponent) (*dpv1alpha1.Backup, error) { - backupSourceName, err := p.getComponentBackupInfoFromAnnotation(synthesizedComponent.Name, constant.RestoreFromBackUpAnnotationKey) - if backupSourceName == nil || err != nil { - return nil, err + if managementPolicy := backupSource[constant.VolumeManagementPolicyKeyForRestore]; managementPolicy != "" { + r.volumeManagementPolicy = dpv1alpha1.VolumeClaimManagementPolicy(managementPolicy) } + r.restoreTime = backupSource[constant.RestoreTimeKeyForRestore] backup := &dpv1alpha1.Backup{} - if err = p.Client.Get(p.Ctx, types.NamespacedName{Name: *backupSourceName, Namespace: p.Cluster.Namespace}, backup); err != nil { + if err = r.Client.Get(r.Ctx, types.NamespacedName{Name: backupSource[constant.BackupNameKeyForRestore], Namespace: r.Cluster.Namespace}, backup); err != nil { return nil, err } return backup, nil } -func (p *RestoreManager) BuildDatafileRestoreJob(synthesizedComponent *component.SynthesizedComponent, backup *dpv1alpha1.Backup, backupTool *dpv1alpha1.BackupTool) (objs []client.Object, err error) { - pvcNames := make([]string, 0) - vct := p.getDataVCT(synthesizedComponent) - for i := int32(0); i < synthesizedComponent.Replicas; i++ { - pvcNames = append(pvcNames, fmt.Sprintf("%s-%s-%s-%d", vct.Name, p.Cluster.Name, synthesizedComponent.Name, i)) - } - return p.BuildDatafileRestoreJobByPVCS(synthesizedComponent, backup, backupTool, pvcNames, p.buildCommonLabels(synthesizedComponent)) -} - -func (p *RestoreManager) BuildDatafileRestoreJobByPVCS(synthesizedComponent *component.SynthesizedComponent, - backup *dpv1alpha1.Backup, - backupTool *dpv1alpha1.BackupTool, - pvcNames []string, - labels map[string]string) (objs []client.Object, err error) { - - // build volumes and volumeMounts of backup - buildBackupVolumesAndMounts := func() ([]corev1.Volume, []corev1.VolumeMount, string, string) { - backupPVCName := backup.Status.PersistentVolumeClaimName - // builds datafile volumes and volumeMounts - backupVolumeName := fmt.Sprintf("%s-%s", synthesizedComponent.Name, backupPVCName) - backupVolumes := []corev1.Volume{ - { - Name: backupVolumeName, - VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: backupPVCName}}, - }, - } - backupMountPath := "/" + backup.Name - backupVolumeMounts := []corev1.VolumeMount{{Name: backupVolumeName, MountPath: backupMountPath}} - - // build logfile volumes and volumeMounts - logFilePVCName := backup.Status.LogFilePersistentVolumeClaimName - if !backupTool.Spec.Physical.IsRelyOnLogfile() || logFilePVCName == backupPVCName { - return backupVolumes, backupVolumeMounts, backupMountPath, backupMountPath - } - logFileVolumeName := fmt.Sprintf("%s-%s", synthesizedComponent.Name, logFilePVCName) - backupVolumes = append(backupVolumes, corev1.Volume{ - Name: logFileVolumeName, - VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: logFilePVCName}}, - }) - logfileMountPath := "/" + backup.Name + "-logfile" - backupVolumeMounts = append(backupVolumeMounts, corev1.VolumeMount{Name: logFileVolumeName, MountPath: logfileMountPath}) - return backupVolumes, backupVolumeMounts, backupMountPath, logfileMountPath - } - - backupVolumes, backupVolumeMounts, backupMountPath, logfileMountPath := buildBackupVolumesAndMounts() - backupVolumeMounts = append(backupVolumeMounts, synthesizedComponent.PodSpec.Containers[0].VolumeMounts...) - volumeMountMap := map[string]corev1.VolumeMount{} - for _, mount := range backupVolumeMounts { - volumeMountMap[mount.Name] = mount - } - - // builds env - buildEnv := func() []corev1.EnvVar { - env := []corev1.EnvVar{{Name: constant.DPBackupName, Value: backup.Name}} - manifests := backup.Status.Manifests - if manifests != nil && manifests.BackupTool != nil { - env = append(env, corev1.EnvVar{Name: constant.DPBackupDIR, Value: fmt.Sprintf("%s%s", backupMountPath, manifests.BackupTool.FilePath)}) - if manifests.BackupTool.LogFilePath != "" { - env = append(env, corev1.EnvVar{Name: constant.DPLogFileDIR, Value: fmt.Sprintf("%s%s", logfileMountPath, manifests.BackupTool.LogFilePath)}) - } - } - timeFormat := p.getTimeFormat(backupTool.Spec.Env) - stopTime := backup.Status.GetStopTime() - if stopTime != nil { - env = append(env, corev1.EnvVar{Name: constant.DPBackupStopTime, Value: stopTime.Format(timeFormat)}) - } - // merges env from backup tool. - env = append(env, backupTool.Spec.Env...) - return env - } - env := buildEnv() - - objs = make([]client.Object, 0) - vct := p.getDataVCT(synthesizedComponent) - for _, pvcName := range pvcNames { - dataVolume := corev1.Volume{ - Name: vct.Name, - VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}}, - } - volumes := []corev1.Volume{dataVolume} - volumes = append(volumes, backupVolumes...) - volumes = append(volumes, synthesizedComponent.PodSpec.Volumes...) - volumeMounts := make([]corev1.VolumeMount, 0) - for _, volume := range volumes { - if vmount, ok := volumeMountMap[volume.Name]; ok { - volumeMounts = append(volumeMounts, vmount) - } - } - jobName := p.GetDatafileRestoreJobName(pvcName) - job, err := factory.BuildRestoreJob(p.Cluster, synthesizedComponent, jobName, backupTool.Spec.Image, - backupTool.Spec.Physical.GetPhysicalRestoreCommand(), volumes, volumeMounts, env, backupTool.Spec.Resources) - if err != nil { - return nil, err - } - // if the workload uses local pv, the job's affinity should consistent with workload. - // so datafile job should contain cluster affinity constraints. - affinity := component.BuildAffinity(p.Cluster, p.Cluster.Spec.GetComponentByName(synthesizedComponent.Name)) - if job.Spec.Template.Spec.Affinity, err = component.BuildPodAffinity(p.Cluster, affinity, synthesizedComponent); err != nil { - return nil, err - } - if p.Scheme != nil { - if err = controllerutil.SetControllerReference(p.Cluster, job, p.Scheme); err != nil { - return nil, err - } - } - job.SetLabels(labels) - objs = append(objs, job) - } - return objs, nil -} - -func (p *RestoreManager) buildPITRPhysicalRestoreJob(synthesizedComponent *component.SynthesizedComponent, - recoveryInfo *dpv1alpha1.BackupToolSpec, - logfileBackup *dpv1alpha1.Backup) (objs []client.Object, err error) { - // gets data dir pvc name - dataPVCs, err := p.getDataPVCs(synthesizedComponent.Name) - if err != nil { - return objs, err - } - if len(dataPVCs) == 0 { - return objs, errors.New("not found data pvc") - } - // renders the pitrJob cue template - image := recoveryInfo.Image - if image == "" { - image = synthesizedComponent.PodSpec.Containers[0].Image - } - logfilePVC, err := p.getLogfilePVC(logfileBackup) - if err != nil { - return objs, err - } - dataVolumeMount := getVolumeMount(recoveryInfo) - volumeMounts := []corev1.VolumeMount{ - {Name: "data", MountPath: dataVolumeMount}, - {Name: "log", MountPath: backupVolumePATH}, - } - // creates physical restore job - for _, dataPVC := range dataPVCs { - volumes := []corev1.Volume{ - {Name: "data", VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: dataPVC.GetName()}}}, - {Name: "log", VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: logfilePVC.GetName()}}}, - } - pitrJobName := p.buildRestoreJobName(fmt.Sprintf("pitr-phy-%s", dataPVC.GetName())) - pitrJob, err := factory.BuildRestoreJob(p.Cluster, synthesizedComponent, pitrJobName, image, - recoveryInfo.Physical.GetPhysicalRestoreCommand(), volumes, volumeMounts, recoveryInfo.Env, recoveryInfo.Resources) - if err != nil { - return objs, err - } - if p.Scheme != nil { - if err = controllerutil.SetControllerReference(p.Cluster, pitrJob, p.Scheme); err != nil { - return nil, err - } - } - pitrJob.SetLabels(p.buildCommonLabels(synthesizedComponent)) - // collect pvcs and jobs for later deletion - objs = append(objs, pitrJob) - } - - return objs, nil -} - -func (p *RestoreManager) buildLogicRestoreJob(synthesizedComponent *component.SynthesizedComponent, - backup *dpv1alpha1.Backup, - backupToolSpec *dpv1alpha1.BackupToolSpec, - envs ...corev1.EnvVar) (objs []client.Object, err error) { - // creates logic restore job, usually imported after the cluster service is started - if p.Cluster.Status.Phase != appsv1alpha1.RunningClusterPhase { - return nil, nil - } - image := backupToolSpec.Image - if image == "" { - image = synthesizedComponent.PodSpec.Containers[0].Image - } - dataVolumeMount := getVolumeMount(backupToolSpec) - volumeMounts := []corev1.VolumeMount{ - {Name: "data", MountPath: dataVolumeMount}, - {Name: "backup-data", MountPath: backupVolumePATH}, - } - pvcsAndPodsMap, err := p.getDataPVCsAndPods(synthesizedComponent.Name, backupToolSpec.Logical.PodScope) - if err != nil { - return objs, err - } - jobEnv := backupToolSpec.Env - jobEnv = append(jobEnv, envs...) - for pvcName, pod := range pvcsAndPodsMap { - podENV := pod.Spec.Containers[0].Env - podENV = append(podENV, corev1.EnvVar{Name: constant.DPDBHost, Value: intctrlutil.BuildPodHostDNS(&pod)}) - podENV = append(podENV, jobEnv...) - volumes := []corev1.Volume{ - {Name: "data", VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}}}, - {Name: "backup-data", VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: backup.Status.PersistentVolumeClaimName}}}, - } - logicJobName := p.buildRestoreJobName(fmt.Sprintf("restore-%s-logic-%s", backup.Spec.BackupType, pod.Name)) - logicJob, err := factory.BuildRestoreJob(p.Cluster, synthesizedComponent, logicJobName, image, - backupToolSpec.Logical.GetLogicalRestoreCommand(), volumes, volumeMounts, podENV, backupToolSpec.Resources) - if err != nil { - return objs, err - } - if p.Scheme != nil { - if err = controllerutil.SetControllerReference(p.Cluster, logicJob, p.Scheme); err != nil { - return nil, err - } - } - logicJob.SetLabels(p.buildCommonLabels(synthesizedComponent)) - // DO NOT use "volume.kubernetes.io/selected-node" annotation key in PVC, because it is unreliable. - logicJob.Spec.Template.Spec.NodeName = pod.Spec.NodeName - objs = append(objs, logicJob) +// createRestoreAndWait create the restore CR and wait for completion. +func (r *RestoreManager) createRestoreAndWait(restore *dpv1alpha1.Restore) error { + if restore == nil { + return nil } - - return objs, nil -} - -func (p *RestoreManager) checkJobDone(key client.ObjectKey) (bool, error) { - result := &batchv1.Job{} - if err := p.Client.Get(p.Ctx, key, result); err != nil { - if apierrors.IsNotFound(err) { - return false, nil - } - // if err is NOT "not found", that means unknown error. - return false, err - } - if result.Status.Conditions != nil && len(result.Status.Conditions) > 0 { - jobStatusCondition := result.Status.Conditions[0] - if jobStatusCondition.Type == batchv1.JobComplete { - return true, nil - } else if jobStatusCondition.Type == batchv1.JobFailed { - return true, errors.New(jobStatusCondition.Reason) - } + if r.Scheme != nil { + _ = controllerutil.SetControllerReference(r.Cluster, restore, r.Scheme) } - // if found, return true - return false, nil -} - -func (p *RestoreManager) createJobsAndWaiting(objs []client.Object) error { - // creates and checks into different loops to support concurrent resource creation. - for _, job := range objs { - fetchedJob := &batchv1.Job{} - if err := p.Client.Get(p.Ctx, client.ObjectKeyFromObject(job), fetchedJob); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - if err = p.Client.Create(p.Ctx, job); err != nil && !apierrors.IsAlreadyExists(err) { - return err - } + if err := r.Client.Get(r.Ctx, client.ObjectKeyFromObject(restore), restore); err != nil { + if !apierrors.IsNotFound(err) { + return err } - } - for _, job := range objs { - if done, err := p.checkJobDone(client.ObjectKeyFromObject(job)); err != nil { + if err = r.Client.Create(r.Ctx, restore); err != nil && !apierrors.IsAlreadyExists(err) { return err - } else if !done { - return intctrlutil.NewErrorf(intctrlutil.ErrorTypeNeedWaiting, "waiting restore job %s", job.GetName()) } } - return nil -} - -func (p *RestoreManager) cleanupJobs(objs []client.Object) error { - if p.Cluster.Status.Phase == appsv1alpha1.RunningClusterPhase { - for _, obj := range objs { - if err := intctrlutil.BackgroundDeleteObject(p.Client, p.Ctx, obj); err != nil { - return err - } - } + if restore.Status.Phase == dpv1alpha1.RestorePhaseCompleted { + return nil } - return nil + if restore.Status.Phase == dpv1alpha1.RestorePhaseFailed { + return intctrlutil.NewErrorf(intctrlutil.ErrorTypeRestoreFailed, `restore "%s" is Failed, you can describe it and re-restore the cluster.`, restore.GetName()) + } + return intctrlutil.NewErrorf(intctrlutil.ErrorTypeNeedWaiting, `waiting for restore "%s" successfully`, restore.GetName()) } -func (p *RestoreManager) cleanupClusterAnnotations() error { - if p.Cluster.Status.Phase == appsv1alpha1.RunningClusterPhase && p.Cluster.Annotations != nil { - cluster := p.Cluster +func (r *RestoreManager) cleanupClusterAnnotations() error { + if r.Cluster.Status.Phase == appsv1alpha1.RunningClusterPhase && r.Cluster.Annotations != nil { + cluster := r.Cluster patch := client.MergeFrom(cluster.DeepCopy()) - delete(cluster.Annotations, constant.RestoreFromSrcClusterAnnotationKey) - delete(cluster.Annotations, constant.RestoreFromTimeAnnotationKey) - delete(cluster.Annotations, constant.RestoreFromBackUpAnnotationKey) - return p.Client.Patch(p.Ctx, cluster, patch) + delete(cluster.Annotations, constant.RestoreFromBackupAnnotationKey) + return r.Client.Patch(r.Ctx, cluster, patch) } return nil } -// buildRestoreJobName builds the restore job name. -func (p *RestoreManager) buildRestoreJobName(jobName string) string { - l := len(jobName) - if l > 63 { - return fmt.Sprintf("%s-%s", jobName[:57], jobName[l-5:l]) - } - return jobName -} - -func (p *RestoreManager) GetDatafileRestoreJobName(pvcName string) string { - return p.buildRestoreJobName(fmt.Sprintf("base-%s", pvcName)) -} - -func (p *RestoreManager) getBackupTool(backupToolName string) (*dpv1alpha1.BackupTool, error) { - backupToolKey := client.ObjectKey{Name: backupToolName} - backupTool := &dpv1alpha1.BackupTool{} - if err := p.Client.Get(p.Ctx, backupToolKey, backupTool); err != nil { - return nil, err - } - return backupTool, nil -} - -func (p *RestoreManager) buildCommonLabels(synthesizedComponent *component.SynthesizedComponent) map[string]string { - return map[string]string{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppInstanceLabelKey: p.Cluster.Name, - constant.KBAppComponentLabelKey: synthesizedComponent.Name, - } -} - -func (p *RestoreManager) buildCommonEnvs(backup *dpv1alpha1.Backup) []corev1.EnvVar { - backupDIR := backup.Name - if backup.Status.Manifests != nil && backup.Status.Manifests.BackupTool != nil { - backupDIR = backup.Status.Manifests.BackupTool.FilePath +func (r *RestoreManager) cleanupRestores(comp *component.SynthesizedComponent) error { + if r.Cluster.Status.Phase != appsv1alpha1.RunningClusterPhase { + return nil } - return []corev1.EnvVar{ - {Name: constant.DPBackupDIR, Value: backupVolumePATH + backupDIR}, - {Name: constant.DPBackupName, Value: backup.Name}, + restoreList := &dpv1alpha1.RestoreList{} + if err := r.Client.List(r.Ctx, restoreList, client.MatchingLabels(factory.BuildCommonLabels(r.Cluster, comp))); err != nil { + return err } -} - -func (p *RestoreManager) getTimeFormat(envs []corev1.EnvVar) string { - for _, env := range envs { - if env.Name == constant.DPTimeFormat { - return env.Value + for i := range restoreList.Items { + if err := intctrlutil.BackgroundDeleteObject(r.Client, r.Ctx, &restoreList.Items[i]); err != nil { + return err } } - return time.RFC3339 + return nil } diff --git a/internal/controller/plan/restore_test.go b/internal/controller/plan/restore_test.go index f02452d8b8d..30d30ac22d9 100644 --- a/internal/controller/plan/restore_test.go +++ b/internal/controller/plan/restore_test.go @@ -25,7 +25,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - batchv1 "k8s.io/api/batch/v1" "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/api/core/v1" @@ -40,6 +39,7 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" ) var _ = Describe("PITR Functions", func() { @@ -49,11 +49,10 @@ var _ = Describe("PITR Functions", func() { var ( randomStr = testCtx.GetRandomStr() - clusterName = "cluster-for-pitr-" + randomStr + clusterName = "cluster-" + randomStr now = metav1.Now() startTime = metav1.Time{Time: now.Add(-time.Hour * 2)} - stopTime = metav1.Time{Time: now.Add(time.Hour * 2)} ) cleanEnv := func() { @@ -79,8 +78,7 @@ var _ = Describe("PITR Functions", func() { testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml, &opts) testapps.ClearResources(&testCtx, generics.BackupSignature, inNS, ml) testapps.ClearResources(&testCtx, generics.BackupPolicySignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.JobSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.CronJobSignature, inNS, ml) + testapps.ClearResources(&testCtx, generics.RestoreSignature, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml) // // non-namespaced @@ -104,21 +102,19 @@ var _ = Describe("PITR Functions", func() { ) var ( - clusterDef *appsv1alpha1.ClusterDefinition - clusterVersion *appsv1alpha1.ClusterVersion - cluster *appsv1alpha1.Cluster - synthesizedComponent *component.SynthesizedComponent - pvc *corev1.PersistentVolumeClaim - backup *dpv1alpha1.Backup - fullBackupTool *dpv1alpha1.BackupTool - fullBackupToolName string - continuousBackupTool *dpv1alpha1.BackupTool - continuousBackupToolName string + clusterDef *appsv1alpha1.ClusterDefinition + clusterVersion *appsv1alpha1.ClusterVersion + cluster *appsv1alpha1.Cluster + synthesizedComponent *component.SynthesizedComponent + pvc *corev1.PersistentVolumeClaim + backup *dpv1alpha1.Backup + fullBackupActionSet *dpv1alpha1.ActionSet + fullBackupActionSetName string ) BeforeEach(func() { clusterDef = testapps.NewClusterDefFactory(clusterDefName). - AddComponentDef(testapps.StatefulMySQLComponent, mysqlCompType). + AddComponentDef(testapps.ConsensusMySQLComponent, mysqlCompType). AddComponentDef(testapps.StatelessNginxComponent, nginxCompType). Create(&testCtx).GetObject() clusterVersion = testapps.NewClusterVersionFactory(clusterVersionName, clusterDefName). @@ -132,6 +128,7 @@ var _ = Describe("PITR Functions", func() { cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDef.Name, clusterVersion.Name). AddComponent(mysqlCompName, mysqlCompType). + SetReplicas(3). SetClusterAffinity(&appsv1alpha1.Affinity{ PodAntiAffinity: appsv1alpha1.Required, TopologyKeys: []string{topologyKey}, @@ -140,7 +137,6 @@ var _ = Describe("PITR Functions", func() { }, }). AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec). - AddRestorePointInTime(metav1.Time{Time: stopTime.Time}, mysqlCompName, sourceCluster). Create(&testCtx).GetObject() By("By mocking a pvc") @@ -155,33 +151,16 @@ var _ = Describe("PITR Functions", func() { _ = testapps.NewPodFactory(testCtx.DefaultNamespace, clusterName+"-"+mysqlCompName+"-0"). AddAppInstanceLabel(clusterName). AddAppComponentLabel(mysqlCompName). - AddAppManangedByLabel(). + AddAppManagedByLabel(). AddVolume(volume). AddLabels(constant.ConsensusSetAccessModeLabelKey, string(appsv1alpha1.ReadWrite)). AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). AddNodeName("fake-node-name"). Create(&testCtx).GetObject() - By("create datafile backup tool") - fullBackupTool = testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName()) - fullBackupToolName = fullBackupTool.Name - - By("By creating backup tool: ") - backupSelfDefineObj := &dpv1alpha1.BackupTool{} - backupSelfDefineObj.SetLabels(map[string]string{ - constant.BackupToolTypeLabelKey: "pitr", - constant.ClusterDefLabelKey: clusterDefName, - }) - continuousBackupTool = testapps.CreateCustomizedObj(&testCtx, "backup/pitr_backuptool.yaml", - backupSelfDefineObj, testapps.RandomizedObjName()) - // set datafile backup relies on logfile - Expect(testapps.ChangeObj(&testCtx, continuousBackupTool, func(tmpObj *dpv1alpha1.BackupTool) { - tmpObj.Spec.Physical.RelyOnLogfile = true - })).Should(Succeed()) - continuousBackupToolName = continuousBackupTool.Name - - backupObj := dpv1alpha1.BackupToolList{} - Expect(testCtx.Cli.List(testCtx.Ctx, &backupObj)).Should(Succeed()) + By("create actionset of full backup") + fullBackupActionSet = testapps.CreateCustomizedObj(&testCtx, "backup/actionset.yaml", &dpv1alpha1.ActionSet{}, testapps.RandomizedObjName()) + fullBackupActionSetName = fullBackupActionSet.Name By("By creating backup policyTemplate: ") backupTplLabels := map[string]string{ @@ -191,17 +170,13 @@ var _ = Describe("PITR Functions", func() { WithRandomName().SetLabels(backupTplLabels). AddBackupPolicy(mysqlCompName). SetClusterDefRef(clusterDefName). - SetTTL(defaultTTL). - AddDatafilePolicy(). - SetBackupToolName(fullBackupToolName). - SetSchedule("0 * * * *", true). - AddIncrementalPolicy(). - SetBackupToolName(continuousBackupToolName). - SetSchedule("0 * * * *", true). - Create(&testCtx).GetObject() + SetRetentionPeriod(defaultTTL). + AddBackupMethod(testdp.BackupMethodName, false, fullBackupActionSetName). + SetBackupMethodVolumeMounts(testapps.DataVolumeName, "/data") clusterCompDefObj := clusterDef.Spec.ComponentDefs[0] synthesizedComponent = &component.SynthesizedComponent{ + WorkloadType: appsv1alpha1.Consensus, PodSpec: clusterCompDefObj.PodSpec, Probes: clusterCompDefObj.Probes, LogConfigs: clusterCompDefObj.LogConfigs, @@ -217,216 +192,79 @@ var _ = Describe("PITR Functions", func() { SetStorage("1Gi"). Create(&testCtx).GetObject() - logfileRemotePVC := testapps.NewPersistentVolumeClaimFactory( - testCtx.DefaultNamespace, "remote-pvc-logfile", clusterName, mysqlCompName, "log"). - SetStorage("1Gi"). - Create(&testCtx).GetObject() - By("By creating base backup: ") backupLabels := map[string]string{ - constant.AppInstanceLabelKey: sourceCluster, - constant.KBAppComponentLabelKey: mysqlCompName, - constant.BackupTypeLabelKeyKey: string(dpv1alpha1.BackupTypeDataFile), - constant.DataProtectionLabelClusterUIDKey: string(cluster.UID), + constant.AppInstanceLabelKey: sourceCluster, + constant.KBAppComponentLabelKey: mysqlCompName, } - backup = testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). + backup = testdp.NewBackupFactory(testCtx.DefaultNamespace, backupName). WithRandomName().SetLabels(backupLabels). SetBackupPolicyName("test-fake"). - SetBackupType(dpv1alpha1.BackupTypeDataFile). + SetBackupMethod(testdp.VSBackupMethodName). Create(&testCtx).GetObject() baseStartTime := &startTime baseStopTime := &now - backupStatus := dpv1alpha1.BackupStatus{ - Phase: dpv1alpha1.BackupCompleted, - StartTimestamp: baseStartTime, - CompletionTimestamp: baseStopTime, - BackupToolName: fullBackupToolName, - SourceCluster: clusterName, - PersistentVolumeClaimName: remotePVC.Name, - LogFilePersistentVolumeClaimName: logfileRemotePVC.Name, - Manifests: &dpv1alpha1.ManifestsStatus{ - BackupTool: &dpv1alpha1.BackupToolManifestsStatus{ - FilePath: fmt.Sprintf("/%s/%s", backup.Namespace, backup.Name), - LogFilePath: fmt.Sprintf("/%s/%s", backup.Namespace, backup.Name+"-logfile"), - }, - BackupLog: &dpv1alpha1.BackupLogStatus{ - StartTime: baseStartTime, - StopTime: baseStopTime, - }, - }, - } - patchBackupStatus(backupStatus, client.ObjectKeyFromObject(backup)) - - By("By creating continuous backup: ") - logfileBackupLabels := map[string]string{ - constant.AppInstanceLabelKey: sourceCluster, - constant.KBAppComponentLabelKey: mysqlCompName, - constant.BackupTypeLabelKeyKey: string(dpv1alpha1.BackupTypeLogFile), - constant.DataProtectionLabelClusterUIDKey: string(cluster.UID), - } - incrStartTime := &startTime - incrStopTime := &stopTime - logfileBackup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - WithRandomName().SetLabels(logfileBackupLabels). - SetBackupPolicyName("test-fake"). - SetBackupType(dpv1alpha1.BackupTypeLogFile). - Create(&testCtx).GetObject() - backupStatus = dpv1alpha1.BackupStatus{ - Phase: dpv1alpha1.BackupCompleted, - StartTimestamp: incrStartTime, - CompletionTimestamp: incrStopTime, - SourceCluster: clusterName, - PersistentVolumeClaimName: logfileRemotePVC.Name, - BackupToolName: continuousBackupToolName, - Manifests: &dpv1alpha1.ManifestsStatus{ - BackupLog: &dpv1alpha1.BackupLogStatus{ - StartTime: incrStartTime, - StopTime: incrStopTime, - }, - }, + backup.Status = dpv1alpha1.BackupStatus{ + Phase: dpv1alpha1.BackupPhaseCompleted, + StartTimestamp: baseStartTime, + CompletionTimestamp: baseStopTime, + PersistentVolumeClaimName: remotePVC.Name, } - patchBackupStatus(backupStatus, client.ObjectKeyFromObject(logfileBackup)) + testdp.MockBackupStatusMethod(backup, testapps.DataVolumeName) + patchBackupStatus(backup.Status, client.ObjectKeyFromObject(backup)) }) It("Test restore", func() { - By("restore from snapshot backup") - backupSnapshot := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). - WithRandomName(). - SetBackupPolicyName("test-fake"). - SetBackupType(dpv1alpha1.BackupTypeSnapshot). - Create(&testCtx).GetObject() - restoreFromBackup := fmt.Sprintf(`{"%s":"%s"}`, mysqlCompName, backupSnapshot.Name) - cluster.Annotations[constant.RestoreFromBackUpAnnotationKey] = restoreFromBackup - Expect(DoRestore(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme)).Should(Succeed()) - - By("restore from datafile backup") - restoreFromBackup = fmt.Sprintf(`{"%s":"%s"}`, mysqlCompName, backup.Name) - cluster.Annotations[constant.RestoreFromBackUpAnnotationKey] = restoreFromBackup - err := DoRestore(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme) + By("restore from backup") + restoreFromBackup := fmt.Sprintf(`{"%s": {"name":"%s"}}`, mysqlCompName, backup.Name) + Expect(testapps.ChangeObj(&testCtx, cluster, func(tmpCluster *appsv1alpha1.Cluster) { + tmpCluster.Annotations = map[string]string{ + constant.RestoreFromBackupAnnotationKey: restoreFromBackup, + } + })).Should(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)).Should(Succeed()) + restoreMGR := NewRestoreManager(ctx, k8sClient, cluster, scheme.Scheme, nil, 3, 0) + err := restoreMGR.DoRestore(synthesizedComponent) Expect(intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeNeedWaiting)).Should(BeTrue()) - }) - testPITR := func() { - baseBackupPhysicalRestore := func() types.NamespacedName { - By("create fullBackup physical restore job") - err := DoPITR(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme) - Expect(intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeNeedWaiting)).Should(BeTrue()) - - By("when base backup restore job completed") - baseBackupJobName := fmt.Sprintf("base-%s", fmt.Sprintf("%s-%s-%s-%d", "data", clusterName, synthesizedComponent.Name, 0)) - baseBackupJobKey := types.NamespacedName{Namespace: cluster.Namespace, Name: baseBackupJobName} - Eventually(testapps.CheckObj(&testCtx, baseBackupJobKey, func(g Gomega, fetched *batchv1.Job) { - envs := fetched.Spec.Template.Spec.Containers[0].Env - var existsTargetENV bool - for _, env := range envs { - if env.Name == constant.KBEnvPodName { - existsTargetENV = true - break - } - } - g.Expect(existsTargetENV).Should(BeTrue()) - })).Should(Succeed()) - Eventually(testapps.GetAndChangeObjStatus(&testCtx, baseBackupJobKey, func(fetched *batchv1.Job) { - fetched.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete}} - })).Should(Succeed()) - return baseBackupJobKey - } - - baseBackupLogicalRestore := func() types.NamespacedName { - By("create and wait for fullbackup logical job is completed ") - err := DoPITR(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme) - Expect(intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeNeedWaiting)).Should(BeTrue()) - - By("when logic full backup jobs are completed") - logicJobName := fmt.Sprintf("restore-datafile-logic-%s-%s-0", clusterName, mysqlCompName) - logicJobKey := types.NamespacedName{Namespace: cluster.Namespace, Name: logicJobName} - Eventually(testapps.GetAndChangeObjStatus(&testCtx, logicJobKey, func(fetched *batchv1.Job) { - fetched.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete}} - })).Should(Succeed()) - return logicJobKey - } - - continuousPhysicalRestore := func() types.NamespacedName { - By("create and wait for pitr physical restore job is completed ") - err := DoPITR(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme) - Expect(intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeNeedWaiting)).Should(BeTrue()) - - By("when physical PITR jobs are completed") - jobName := fmt.Sprintf("pitr-phy-data-%s-%s-0", clusterName, mysqlCompName) - jobKey := types.NamespacedName{Namespace: cluster.Namespace, Name: jobName} - Eventually(testapps.GetAndChangeObjStatus(&testCtx, jobKey, func(fetched *batchv1.Job) { - fetched.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete}} - })).Should(Succeed()) - return jobKey - } - - continuousLogicalRestore := func() types.NamespacedName { - By("create and wait for pitr logical job is completed ") - err := DoPITR(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme) - Expect(intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeNeedWaiting)).Should(BeTrue()) - - By("mock the podScope is ReadWrite for logic restore") - Expect(testapps.ChangeObj(&testCtx, continuousBackupTool, func(tool *dpv1alpha1.BackupTool) { - tool.Spec.Logical.PodScope = dpv1alpha1.PodRestoreScopeReadWrite - })).Should(Succeed()) - err = DoPITR(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme) - Expect(intctrlutil.IsTargetError(err, intctrlutil.ErrorTypeNeedWaiting)).Should(BeTrue()) - - By("when logic PITR jobs are completed") - logicJobName := fmt.Sprintf("restore-logfile-logic-%s-%s-0", clusterName, mysqlCompName) - logicJobKey := types.NamespacedName{Namespace: cluster.Namespace, Name: logicJobName} - Eventually(testapps.GetAndChangeObjStatus(&testCtx, logicJobKey, func(fetched *batchv1.Job) { - fetched.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete}} - })).Should(Succeed()) - return logicJobKey - } - cluster.Status.ObservedGeneration = 1 - var backupJobKeys []types.NamespacedName - // do full backup physical restore - if fullBackupTool.Spec.Physical.GetPhysicalRestoreCommand() != nil { - backupJobKeys = append(backupJobKeys, baseBackupPhysicalRestore()) - } - - // do continuous backup physical restore - if continuousBackupTool.Spec.Physical.GetPhysicalRestoreCommand() != nil { - backupJobKeys = append(backupJobKeys, continuousPhysicalRestore()) - } - Expect(DoPITR(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme)).Should(Succeed()) - - By("when logic PITR jobs are creating after cluster RUNNING") - Eventually(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(cluster), func(fetched *appsv1alpha1.Cluster) { - fetched.Status.Phase = appsv1alpha1.RunningClusterPhase + By("mock restore of prepareData stage to Completed") + restoreMeta := restoreMGR.GetRestoreObjectMeta(synthesizedComponent, dpv1alpha1.PrepareData) + namedspace := types.NamespacedName{Name: restoreMeta.Name, Namespace: restoreMeta.Namespace} + Expect(testapps.GetAndChangeObjStatus(&testCtx, namedspace, func(restore *dpv1alpha1.Restore) { + restore.Status.Phase = dpv1alpha1.RestorePhaseCompleted + })()).ShouldNot(HaveOccurred()) + + By("mock cluster phase to Running") + Expect(testapps.ChangeObjStatus(&testCtx, cluster, func() { + cluster.Status.Phase = appsv1alpha1.RunningClusterPhase + cluster.Status.Components = map[string]appsv1alpha1.ClusterComponentStatus{ + mysqlCompName: { + Phase: appsv1alpha1.RunningClusterCompPhase, + }, + } })).Should(Succeed()) - cluster.Status.Phase = appsv1alpha1.RunningClusterPhase - // do full backup logical restore - if fullBackupTool.Spec.Logical.GetLogicalRestoreCommand() != nil { - backupJobKeys = append(backupJobKeys, baseBackupLogicalRestore()) - } - - // do continuous logical restore - if continuousBackupTool.Spec.Logical.GetLogicalRestoreCommand() != nil { - backupJobKeys = append(backupJobKeys, continuousLogicalRestore()) - } - Expect(DoPITR(ctx, testCtx.Cli, cluster, synthesizedComponent, scheme.Scheme)).Should(Succeed()) - - By("expect all jobs are cleaned") - for _, v := range backupJobKeys { - Eventually(testapps.CheckObjExists(&testCtx, v, &batchv1.Job{}, false)).Should(Succeed()) - } - } - - It("Test PITR restore when only support physical restore for full backup", func() { - testPITR() - }) - - It("Test PITR restore when only support physical logical for full backup", func() { - Expect(testapps.ChangeObj(&testCtx, fullBackupTool, func(tool *dpv1alpha1.BackupTool) { - fullBackupTool.Spec.Logical.RestoreCommands = fullBackupTool.Spec.Physical.GetPhysicalRestoreCommand() - fullBackupTool.Spec.Physical.RestoreCommands = nil + By("wait for postReady restore created and mock it to Completed") + restoreMGR.Cluster = cluster + _ = restoreMGR.DoRestore(synthesizedComponent) + + // check if restore CR of postReady stage is created. + restoreMeta = restoreMGR.GetRestoreObjectMeta(synthesizedComponent, dpv1alpha1.PostReady) + namedspace = types.NamespacedName{Name: restoreMeta.Name, Namespace: restoreMeta.Namespace} + Eventually(testapps.CheckObjExists(&testCtx, namedspace, + &dpv1alpha1.Restore{}, true)).Should(Succeed()) + // set restore to Completed + Expect(testapps.GetAndChangeObjStatus(&testCtx, namedspace, func(restore *dpv1alpha1.Restore) { + restore.Status.Phase = dpv1alpha1.RestorePhaseCompleted + })()).ShouldNot(HaveOccurred()) + + By("clean up annotations after cluster running") + _ = restoreMGR.DoRestore(synthesizedComponent) + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(cluster), func(g Gomega, tmpCluster *appsv1alpha1.Cluster) { + g.Expect(tmpCluster.Annotations[constant.RestoreFromBackupAnnotationKey]).Should(BeEmpty()) })).Should(Succeed()) - testPITR() }) + }) }) diff --git a/internal/controller/plan/suite_test.go b/internal/controller/plan/suite_test.go index 20f3d6d4ba7..68e486ca709 100644 --- a/internal/controller/plan/suite_test.go +++ b/internal/controller/plan/suite_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/testutil" viper "github.com/apecloud/kubeblocks/internal/viperx" @@ -100,7 +100,7 @@ var _ = BeforeSuite(func() { err = appsv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = dataprotectionv1alpha1.AddToScheme(scheme.Scheme) + err = dpv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapshotv1.AddToScheme(scheme.Scheme) diff --git a/internal/controller/rsm/utils.go b/internal/controller/rsm/utils.go index 9fbb00d68cf..7efc55b7e03 100644 --- a/internal/controller/rsm/utils.go +++ b/internal/controller/rsm/utils.go @@ -243,7 +243,7 @@ func getHeadlessSvcName(rsm workloads.ReplicatedStateMachine) string { } func findSvcPort(rsm workloads.ReplicatedStateMachine) int { - if rsm.Spec.Service == nil { + if rsm.Spec.Service == nil || len(rsm.Spec.Service.Spec.Ports) == 0 { return 0 } port := rsm.Spec.Service.Spec.Ports[0] diff --git a/internal/controllerutil/controller_common.go b/internal/controllerutil/controller_common.go index 45dccb00fc7..1df1ab8c850 100644 --- a/internal/controllerutil/controller_common.go +++ b/internal/controllerutil/controller_common.go @@ -233,8 +233,8 @@ func BackgroundDeleteObject(cli client.Client, ctx context.Context, obj client.O PropagationPolicy: &deletePropagation, } - if err := cli.Delete(ctx, obj, deleteOptions); err != nil && !apierrors.IsNotFound(err) { - return err + if err := cli.Delete(ctx, obj, deleteOptions); err != nil { + return client.IgnoreNotFound(err) } return nil } diff --git a/internal/controllerutil/errors.go b/internal/controllerutil/errors.go index 9cc552195b1..e56e54f071c 100644 --- a/internal/controllerutil/errors.go +++ b/internal/controllerutil/errors.go @@ -22,7 +22,6 @@ package controllerutil import ( "errors" "fmt" - "strings" ) type Error struct { @@ -48,21 +47,12 @@ const ( ErrorTypeRequeue ErrorType = "Requeue" // requeue for reconcile. - // ErrorType for backup - ErrorTypeBackupNotSupported ErrorType = "BackupNotSupported" // this backup type not supported - ErrorTypeBackupPVTemplateNotFound ErrorType = "BackupPVTemplateNotFound" // this pv template not found - ErrorTypeBackupNotCompleted ErrorType = "BackupNotCompleted" // report backup not completed. - ErrorTypeBackupPVCNameIsEmpty ErrorType = "BackupPVCNameIsEmpty" // pvc name for backup is empty - ErrorTypeBackupJobFailed ErrorType = "BackupJobFailed" // backup job failed - ErrorTypeStorageNotMatch ErrorType = "ErrorTypeStorageNotMatch" - ErrorTypeReconfigureFailed ErrorType = "ErrorTypeReconfigureFailed" - ErrorTypeInvalidLogfileBackupName ErrorType = "InvalidLogfileBackupName" - ErrorTypeBackupScheduleDisabled ErrorType = "BackupScheduleDisabled" - ErrorTypeLogfileScheduleDisabled ErrorType = "LogfileScheduleDisabled" + ErrorTypeFatal ErrorType = "Fatal" // fatal error // ErrorType for cluster controller - ErrorTypeBackupFailed ErrorType = "BackupFailed" - ErrorTypeNeedWaiting ErrorType = "NeedWaiting" // waiting for next reconcile + ErrorTypeBackupFailed ErrorType = "BackupFailed" + ErrorTypeRestoreFailed ErrorType = "RestoreFailed" + ErrorTypeNeedWaiting ErrorType = "NeedWaiting" // waiting for next reconcile // ErrorType for preflight ErrorTypePreflightCommon = "PreflightCommon" @@ -114,37 +104,7 @@ func IsNotFound(err error) bool { return IsTargetError(err, ErrorTypeNotFound) } -// NewBackupNotSupported returns a new Error with ErrorTypeBackupNotSupported. -func NewBackupNotSupported(backupType, backupPolicyName string) *Error { - return NewErrorf(ErrorTypeBackupNotSupported, `backup type "%s" not supported by backup policy "%s"`, backupType, backupPolicyName) -} - -// NewBackupPVTemplateNotFound returns a new Error with ErrorTypeBackupPVTemplateNotFound. -func NewBackupPVTemplateNotFound(cmName, cmNamespace string) *Error { - return NewErrorf(ErrorTypeBackupPVTemplateNotFound, `"the persistentVolume template is empty in the configMap %s/%s", pvConfig.Namespace, pvConfig.Name`, cmNamespace, cmName) -} - -// NewBackupPVCNameIsEmpty returns a new Error with ErrorTypeBackupPVCNameIsEmpty. -func NewBackupPVCNameIsEmpty(backupType, backupPolicyName string) *Error { - return NewErrorf(ErrorTypeBackupPVCNameIsEmpty, `the persistentVolumeClaim name of spec.%s is empty in BackupPolicy "%s"`, strings.ToLower(backupType), backupPolicyName) -} - -// NewBackupJobFailed returns a new Error with ErrorTypeBackupJobFailed. -func NewBackupJobFailed(jobName string) *Error { - return NewErrorf(ErrorTypeBackupJobFailed, `backup job "%s" failed`, jobName) -} - -// NewInvalidLogfileBackupName returns a new Error with ErrorTypeInvalidLogfileBackupName. -func NewInvalidLogfileBackupName(backupPolicyName string) *Error { - return NewErrorf(ErrorTypeInvalidLogfileBackupName, `backup name is incorrect for logfile, you can create the logfile backup by enabling the schedule in BackupPolicy "%s"`, backupPolicyName) -} - -// NewBackupScheduleDisabled returns a new Error with ErrorTypeBackupScheduleDisabled. -func NewBackupScheduleDisabled(backupType, backupPolicyName string) *Error { - return NewErrorf(ErrorTypeBackupScheduleDisabled, `%s schedule is disabled, you can enable spec.schedule.%s in BackupPolicy "%s"`, backupType, backupType, backupPolicyName) -} - -// NewBackupLogfileScheduleDisabled returns a new Error with ErrorTypeLogfileScheduleDisabled. -func NewBackupLogfileScheduleDisabled(backupToolName string) *Error { - return NewErrorf(ErrorTypeLogfileScheduleDisabled, `BackupTool "%s" of the backup relies on logfile. Please enable the logfile scheduling firstly`, backupToolName) +// NewFatalError returns a new Error with ErrorTypeFatal +func NewFatalError(message string) *Error { + return NewErrorf(ErrorTypeFatal, message) } diff --git a/internal/controllerutil/errors_test.go b/internal/controllerutil/errors_test.go index ff9ce39fe81..469df4b7b5b 100644 --- a/internal/controllerutil/errors_test.go +++ b/internal/controllerutil/errors_test.go @@ -20,67 +20,11 @@ along with this program. If not, see . package controllerutil import ( - "fmt" "testing" "github.com/pkg/errors" ) -func TestNerError(t *testing.T) { - err1 := NewError(ErrorTypeBackupNotCompleted, "test c2") - if err1.Error() != "test c2" { - t.Error("NewErrorf failed") - } -} - -func TestNerErrorf(t *testing.T) { - err1 := NewErrorf(ErrorTypeBackupNotCompleted, "test %s %s", "c1", "c2") - if err1.Error() != "test c1 c2" { - t.Error("NewErrorf failed") - } - testError := fmt.Errorf("test: %w", err1) - if !errors.Is(testError, err1) { - t.Error("errors.Is failed") - } - - var target *Error - if !errors.As(testError, &target) { - t.Error("errors.As failed") - } -} - -func TestNewErrors(t *testing.T) { - backupNotSupported := NewBackupNotSupported("datafile", "policy-test") - if !IsTargetError(backupNotSupported, ErrorTypeBackupNotSupported) { - t.Error("should be error of BackupNotSupported") - } - pvTemplateNotFound := NewBackupPVTemplateNotFound("configName", "default") - if !IsTargetError(pvTemplateNotFound, ErrorTypeBackupPVTemplateNotFound) { - t.Error("should be error of BackupPVTemplateNotFound") - } - pvsIsEmpty := NewBackupPVCNameIsEmpty("datafile", "policy-test1") - if !IsTargetError(pvsIsEmpty, ErrorTypeBackupPVCNameIsEmpty) { - t.Error("should be error of BackupPVCNameIsEmpty") - } - jobFailed := NewBackupJobFailed("jobName") - if !IsTargetError(jobFailed, ErrorTypeBackupJobFailed) { - t.Error("should be error of BackupJobFailed") - } -} - -func TestUnwrapControllerError(t *testing.T) { - backupNotSupported := NewBackupNotSupported("datafile", "policy-test") - newErr := UnwrapControllerError(backupNotSupported) - if newErr == nil { - t.Error("should unwrap a controller error, but got nil") - } - err := errors.New("test error") - newErr = UnwrapControllerError(err) - if newErr != nil { - t.Errorf("should not unwrap a controller error, but got: %v", newErr) - } -} - func TestIsTargetError(t *testing.T) { var err1 error if IsTargetError(err1, ErrorWaitCacheRefresh) { diff --git a/internal/controllerutil/pod_utils.go b/internal/controllerutil/pod_utils.go index 901b84c97a2..68d79701789 100644 --- a/internal/controllerutil/pod_utils.go +++ b/internal/controllerutil/pod_utils.go @@ -428,14 +428,18 @@ func (c ByPodName) Less(i, j int) bool { // BuildPodHostDNS builds the host dns of pod. // ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ func BuildPodHostDNS(pod *corev1.Pod) string { + if pod == nil { + return "" + } // build pod dns string // ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - hostDNS := []string{pod.Name} - if pod.Spec.Hostname != "" { - hostDNS[0] = pod.Spec.Hostname - } if pod.Spec.Subdomain != "" { + hostDNS := []string{pod.Name} + if pod.Spec.Hostname != "" { + hostDNS[0] = pod.Spec.Hostname + } hostDNS = append(hostDNS, pod.Spec.Subdomain) + return strings.Join(hostDNS, ".") } - return strings.Join(hostDNS, ".") + return pod.Status.PodIP } diff --git a/internal/dataprotection/action/action.go b/internal/dataprotection/action/action.go new file mode 100644 index 00000000000..d3e51216b31 --- /dev/null +++ b/internal/dataprotection/action/action.go @@ -0,0 +1,51 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" +) + +type Action interface { + // Execute executes the action. + Execute(ctx Context) (*dpv1alpha1.ActionStatus, error) + + // GetName returns the Name of the action. + GetName() string + + // Type returns the type of the action. + Type() dpv1alpha1.ActionType +} + +type Context struct { + Ctx context.Context + Client client.Client + Recorder record.EventRecorder + + Scheme *runtime.Scheme + RestClientConfig *rest.Config +} diff --git a/internal/dataprotection/action/action_create_vs.go b/internal/dataprotection/action/action_create_vs.go new file mode 100644 index 00000000000..43ccfcc9e94 --- /dev/null +++ b/internal/dataprotection/action/action_create_vs.go @@ -0,0 +1,261 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action + +import ( + "context" + "fmt" + "strings" + + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/dataprotection/builder" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + dputils "github.com/apecloud/kubeblocks/internal/dataprotection/utils" +) + +// CreateVolumeSnapshotAction is an action that creates the volume snapshot. +type CreateVolumeSnapshotAction struct { + // Name is the Name of the action. + Name string + + // Owner is the owner of the volume snapshot. + Owner client.Object + + // ObjectMeta is the metadata of the volume snapshot. + ObjectMeta metav1.ObjectMeta + + // PersistentVolumeClaimWrappers is the list of persistent volume claims wrapper to snapshot. + PersistentVolumeClaimWrappers []PersistentVolumeClaimWrapper +} + +type PersistentVolumeClaimWrapper struct { + VolumeName string + PersistentVolumeClaim corev1.PersistentVolumeClaim +} + +func NewPersistentVolumeClaimWrapper(pvc corev1.PersistentVolumeClaim, volumeName string) PersistentVolumeClaimWrapper { + return PersistentVolumeClaimWrapper{PersistentVolumeClaim: pvc, VolumeName: volumeName} +} + +var configVolumeSnapshotError = []string{ + "Failed to set default snapshot class with error", + "Failed to get snapshot class with error", + "Failed to create snapshot content with error cannot find CSI PersistentVolumeSource for volume", +} + +func (c *CreateVolumeSnapshotAction) GetName() string { + return c.Name +} + +func (c *CreateVolumeSnapshotAction) Type() dpv1alpha1.ActionType { + return dpv1alpha1.ActionTypeNone +} + +func (c *CreateVolumeSnapshotAction) Execute(ctx Context) (*dpv1alpha1.ActionStatus, error) { + sb := newStatusBuilder(c) + handleErr := func(err error) (*dpv1alpha1.ActionStatus, error) { + return sb.withErr(err).build(), err + } + + if err := c.validate(); err != nil { + return handleErr(err) + } + + vsCli := intctrlutil.VolumeSnapshotCompatClient{ + Client: ctx.Client, + Ctx: ctx.Ctx, + } + + var ( + ok bool + err error + snap *vsv1.VolumeSnapshot + ) + for _, w := range c.PersistentVolumeClaimWrappers { + key := client.ObjectKey{ + Namespace: w.PersistentVolumeClaim.Namespace, + Name: dputils.GetBackupVolumeSnapshotName(c.ObjectMeta.Name, w.VolumeName), + } + // create volume snapshot + if err = c.createVolumeSnapshotIfNotExist(ctx, vsCli, &w.PersistentVolumeClaim, key); err != nil { + return handleErr(err) + } + + ok, snap, err = ensureVolumeSnapshotReady(vsCli, key) + if err != nil { + return handleErr(err) + } + + if !ok { + return sb.startTimestamp(&snap.CreationTimestamp).build(), nil + } + } + + // volume snapshot is ready and status is not error + // TODO(ldm): now only support one volume to take snapshot, set its time, size to status + return sb.phase(dpv1alpha1.ActionPhaseCompleted). + phase(dpv1alpha1.ActionPhaseCompleted). + totalSize(snap.Status.RestoreSize.String()). + timeRange(snap.Status.CreationTime, snap.Status.CreationTime). + build(), nil +} + +func (c *CreateVolumeSnapshotAction) validate() error { + if len(c.PersistentVolumeClaimWrappers) == 0 { + return errors.New("persistent volume claims are required") + } + if len(c.PersistentVolumeClaimWrappers) > 1 { + return errors.New("only one persistent volume claim is supported") + } + return nil +} + +// createVolumeSnapshotIfNotExist check volume snapshot exists, if not, create it. +func (c *CreateVolumeSnapshotAction) createVolumeSnapshotIfNotExist(ctx Context, + vsCli intctrlutil.VolumeSnapshotCompatClient, + pvc *corev1.PersistentVolumeClaim, + key client.ObjectKey) error { + var ( + err error + vsc *vsv1.VolumeSnapshotClass + ) + + snap := &vsv1.VolumeSnapshot{} + exists, err := vsCli.CheckResourceExists(key, snap) + if err != nil { + return err + } + + // if the volume snapshot already exists, skip creating it. + if exists { + return nil + } + + // create volume snapshot + if pvc.Spec.StorageClassName != nil && *pvc.Spec.StorageClassName != "" { + vsc, err = createVolumeSnapshotClassIfNotExist(ctx.Ctx, ctx.Client, vsCli, *pvc.Spec.StorageClassName) + if err != nil { + return err + } + } + + c.ObjectMeta.Name = key.Name + c.ObjectMeta.Namespace = key.Namespace + + // create volume snapshot + snap = &vsv1.VolumeSnapshot{ + ObjectMeta: c.ObjectMeta, + Spec: vsv1.VolumeSnapshotSpec{ + Source: vsv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: &pvc.Name, + }, + }, + } + + if vsc != nil { + snap.Spec.VolumeSnapshotClassName = &vsc.Name + } + + controllerutil.AddFinalizer(snap, dptypes.DataProtectionFinalizerName) + if err = setControllerReference(c.Owner, snap, ctx.Scheme); err != nil { + return err + } + + msg := fmt.Sprintf("creating volume snapshot %s/%s", snap.Namespace, snap.Name) + ctx.Recorder.Event(c.Owner, corev1.EventTypeNormal, "CreatingVolumeSnapshot", msg) + if err = ctx.Client.Create(ctx.Ctx, snap); err != nil { + return err + } + return nil +} + +func createVolumeSnapshotClassIfNotExist( + ctx context.Context, + cli client.Client, + vsCli intctrlutil.VolumeSnapshotCompatClient, + scName string) (*vsv1.VolumeSnapshotClass, error) { + scObj := storagev1.StorageClass{} + // ignore if not found storage class, use the default volume snapshot class + if err := cli.Get(ctx, client.ObjectKey{Name: scName}, &scObj); client.IgnoreNotFound(err) != nil { + return nil, err + } + + vscList := vsv1.VolumeSnapshotClassList{} + if err := vsCli.List(&vscList); err != nil { + return nil, err + } + for _, item := range vscList.Items { + if item.Driver == scObj.Provisioner { + return item.DeepCopy(), nil + } + } + + // not found matched volume snapshot class, create one + vscName := fmt.Sprintf("vsc-%s-%s", scName, scObj.UID[:8]) + newVsc := builder.BuildVolumeSnapshotClass(vscName, scObj.Provisioner) + if err := vsCli.Create(newVsc); err != nil { + return nil, err + } + return newVsc, nil +} + +func ensureVolumeSnapshotReady( + vsCli intctrlutil.VolumeSnapshotCompatClient, + key client.ObjectKey) (bool, *vsv1.VolumeSnapshot, error) { + snap := &vsv1.VolumeSnapshot{} + // not found, continue the creation process + exists, err := vsCli.CheckResourceExists(key, snap) + if err != nil { + return false, nil, err + } + if exists && snap.Status != nil { + // check if snapshot status throws an error, e.g. csi does not support volume snapshot + if isVolumeSnapshotConfigError(snap) { + return false, nil, errors.New(*snap.Status.Error.Message) + } + if snap.Status.ReadyToUse != nil && *snap.Status.ReadyToUse { + return true, snap, nil + } + } + return false, snap, nil +} + +func isVolumeSnapshotConfigError(snap *vsv1.VolumeSnapshot) bool { + if snap.Status == nil || snap.Status.Error == nil || snap.Status.Error.Message == nil { + return false + } + for _, errMsg := range configVolumeSnapshotError { + if strings.Contains(*snap.Status.Error.Message, errMsg) { + return true + } + } + return false +} + +var _ Action = &CreateVolumeSnapshotAction{} diff --git a/internal/dataprotection/action/action_create_vs_test.go b/internal/dataprotection/action/action_create_vs_test.go new file mode 100644 index 00000000000..4258ec3fe48 --- /dev/null +++ b/internal/dataprotection/action/action_create_vs_test.go @@ -0,0 +1,102 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/dataprotection/action" + dputils "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + "github.com/apecloud/kubeblocks/internal/generics" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +var _ = Describe("CreateVolumeSnapshotAction Test", func() { + const ( + actionName = "test-create-vs-action" + pvcName = "test-pvc" + volumeName = "test-volume" + ) + + cleanEnv := func() { + By("clean resources") + inNS := client.InNamespace(testCtx.DefaultNamespace) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.VolumeSnapshotSignature, true, inNS) + } + + BeforeEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, testdp.KBToolImage) + }) + + AfterEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, "") + }) + + Context("create action that create volume snapshot", func() { + It("should return error when PVC is empty", func() { + act := &action.CreateVolumeSnapshotAction{} + status, err := act.Execute(buildActionCtx()) + Expect(err).To(HaveOccurred()) + Expect(status.Phase).Should(Equal(dpv1alpha1.ActionPhaseFailed)) + }) + + It("should success to execute action", func() { + act := &action.CreateVolumeSnapshotAction{ + Name: actionName, + Owner: testdp.NewFakeBackup(&testCtx, nil), + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: actionName, + }, + PersistentVolumeClaimWrappers: []action.PersistentVolumeClaimWrapper{ + { + PersistentVolumeClaim: *testdp.NewFakePVC(&testCtx, pvcName), + VolumeName: volumeName, + }, + }, + } + + By("execute action, its status should be running") + status, err := act.Execute(buildActionCtx()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(status.Phase).Should(Equal(dpv1alpha1.ActionPhaseRunning)) + + By("check volume snapshot be created") + key := client.ObjectKey{ + Namespace: testCtx.DefaultNamespace, + Name: dputils.GetBackupVolumeSnapshotName(actionName, volumeName), + } + Eventually(testapps.CheckObjExists(&testCtx, key, &vsv1.VolumeSnapshot{}, true)).Should(Succeed()) + }) + }) +}) diff --git a/internal/dataprotection/action/action_exec.go b/internal/dataprotection/action/action_exec.go new file mode 100644 index 00000000000..c004b7ad3f9 --- /dev/null +++ b/internal/dataprotection/action/action_exec.go @@ -0,0 +1,110 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +// ExecAction is an action that executes a command on a pod. +// This action will create a job to execute the command. +type ExecAction struct { + JobAction + + // PodName is the Name of the pod to execute the command on. + PodName string + + // Namespace is the Namespace of the pod to execute the command on. + Namespace string + + // Command is the command to execute. + Command []string + + // Container is the container to execute the command on. + Container string + + // ServiceAccountName is the service account to use to build the job object. + ServiceAccountName string + + // Timeout is the timeout for the command. + Timeout metav1.Duration +} + +func (e *ExecAction) Execute(ctx Context) (*dpv1alpha1.ActionStatus, error) { + if err := e.validate(); err != nil { + return nil, err + } + e.JobAction.PodSpec = e.buildPodSpec() + return e.JobAction.Execute(ctx) +} + +func (e *ExecAction) validate() error { + if e.PodName == "" { + return errors.New("pod name is required") + } + if e.Namespace == "" { + return errors.New("namespace is required") + } + if len(e.Command) == 0 { + return errors.New("command is required") + } + return nil +} + +func (e *ExecAction) buildPodSpec() *corev1.PodSpec { + return &corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + ServiceAccountName: e.ServiceAccountName, + Containers: []corev1.Container{ + { + Name: e.Name, + Image: viper.GetString(constant.KBToolsImage), + ImagePullPolicy: corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)), + Command: []string{"kubectl"}, + Args: append([]string{ + "-n", + e.Namespace, + "exec", + e.PodName, + "-c", + e.Container, + "--", + }, e.Command...), + }, + }, + Volumes: []corev1.Volume{}, + // tolerate all taints + Tolerations: []corev1.Toleration{ + { + Operator: corev1.TolerationOpExists, + }, + }, + Affinity: &corev1.Affinity{}, + NodeSelector: map[string]string{}, + } +} + +var _ Action = &ExecAction{} diff --git a/internal/dataprotection/action/action_exec_test.go b/internal/dataprotection/action/action_exec_test.go new file mode 100644 index 00000000000..dc35eea160d --- /dev/null +++ b/internal/dataprotection/action/action_exec_test.go @@ -0,0 +1,127 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/dataprotection/action" + "github.com/apecloud/kubeblocks/internal/generics" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +var _ = Describe("ExecAction Test", func() { + const ( + actionName = "test-exec-action" + podName = "pod" + container = "container" + serviceAccountName = "service-account" + ) + + var ( + command = []string{"ls"} + ) + + cleanEnv := func() { + By("clean resources") + inNS := client.InNamespace(testCtx.DefaultNamespace) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS) + testapps.ClearResources(&testCtx, generics.PodSignature, inNS) + } + + BeforeEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, testdp.KBToolImage) + }) + + AfterEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, "") + }) + + Context("create exec action", func() { + It("should return error when pod name is empty", func() { + act := &action.ExecAction{} + status, err := act.Execute(buildActionCtx()) + Expect(err).To(HaveOccurred()) + Expect(status).Should(BeNil()) + }) + + It("should build pod spec but job action validate failed", func() { + act := &action.ExecAction{ + JobAction: action.JobAction{ + Name: actionName, + }, + PodName: podName, + Namespace: testCtx.DefaultNamespace, + Command: command, + } + status, err := act.Execute(buildActionCtx()) + Expect(err).To(HaveOccurred()) + Expect(status).ShouldNot(BeNil()) + Expect(status.Phase).Should(Equal(dpv1alpha1.ActionPhaseFailed)) + Expect(act.JobAction.PodSpec).ShouldNot(BeNil()) + }) + + It("should success to build exec action", func() { + labels := map[string]string{ + "dp-test-action": actionName, + } + + act := &action.ExecAction{ + JobAction: action.JobAction{ + Name: actionName, + ObjectMeta: metav1.ObjectMeta{ + Name: actionName, + Namespace: testCtx.DefaultNamespace, + Labels: labels, + }, + Owner: testdp.NewFakeBackup(&testCtx, nil), + }, + PodName: podName, + Namespace: testCtx.DefaultNamespace, + Command: command, + Container: container, + ServiceAccountName: serviceAccountName, + } + + By("should success to execute") + status, err := act.Execute(buildActionCtx()) + Expect(err).Should(Succeed()) + Expect(status).ShouldNot(BeNil()) + Expect(status.Phase).Should(Equal(dpv1alpha1.ActionPhaseRunning)) + + By("check the job was created") + job := &batchv1.Job{} + key := client.ObjectKey{Name: actionName, Namespace: testCtx.DefaultNamespace} + Eventually(testapps.CheckObjExists(&testCtx, key, job, true)).Should(Succeed()) + }) + }) +}) diff --git a/internal/dataprotection/action/action_job.go b/internal/dataprotection/action/action_job.go new file mode 100644 index 00000000000..931ccfb18d7 --- /dev/null +++ b/internal/dataprotection/action/action_job.go @@ -0,0 +1,137 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action + +import ( + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ref "k8s.io/client-go/tools/reference" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + ctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/dataprotection/types" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils" +) + +// JobAction is an action that creates a batch job. +type JobAction struct { + // Name is the Name of the action. + Name string + + // Owner is the owner of the job. + Owner client.Object + + // ObjectMeta is the metadata of the job. + ObjectMeta metav1.ObjectMeta + + // PodSpec is the + PodSpec *corev1.PodSpec + + // BackOffLimit is the number of retries before considering a JobAction as failed. + BackOffLimit *int32 +} + +func (j *JobAction) GetName() string { + return j.Name +} + +func (j *JobAction) Type() dpv1alpha1.ActionType { + return dpv1alpha1.ActionTypeJob +} + +func (j *JobAction) Execute(ctx Context) (*dpv1alpha1.ActionStatus, error) { + sb := newStatusBuilder(j) + handleErr := func(err error) (*dpv1alpha1.ActionStatus, error) { + return sb.withErr(err).build(), err + } + + if err := j.validate(); err != nil { + return handleErr(err) + } + + key := client.ObjectKey{ + Namespace: j.ObjectMeta.Namespace, + Name: j.ObjectMeta.Name, + } + original := batchv1.Job{} + exists, err := ctrlutil.CheckResourceExists(ctx.Ctx, ctx.Client, key, &original) + if err != nil { + return handleErr(err) + } else if exists { + // job exists, check job status and set action status accordingly + objRef, _ := ref.GetReference(ctx.Scheme, &original) + sb = sb.startTimestamp(&original.CreationTimestamp).objectRef(objRef) + _, finishedType, msg := utils.IsJobFinished(&original) + switch finishedType { + case batchv1.JobComplete: + return sb.phase(dpv1alpha1.ActionPhaseCompleted). + completionTimestamp(nil). + reason(""). + build(), nil + case batchv1.JobFailed: + return sb.phase(dpv1alpha1.ActionPhaseFailed). + completionTimestamp(nil). + reason(msg). + build(), nil + } + // job is running + return sb.build(), nil + } + + // job doesn't exist, create it + job := &batchv1.Job{ + ObjectMeta: j.ObjectMeta, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: j.ObjectMeta, + Spec: *j.PodSpec, + }, + BackoffLimit: j.BackOffLimit, + }, + } + + controllerutil.AddFinalizer(job, types.DataProtectionFinalizerName) + if err = setControllerReference(j.Owner, job, ctx.Scheme); err != nil { + return handleErr(err) + } + msg := fmt.Sprintf("creating job %s/%s", job.Namespace, job.Name) + ctx.Recorder.Event(j.Owner, corev1.EventTypeNormal, "CreatingJob", msg) + return handleErr(client.IgnoreAlreadyExists(ctx.Client.Create(ctx.Ctx, job))) +} + +func (j *JobAction) validate() error { + if j.ObjectMeta.Name == "" { + return fmt.Errorf("name is required") + } + if j.PodSpec == nil { + return fmt.Errorf("PodSpec is required") + } + if j.BackOffLimit == nil { + j.BackOffLimit = &types.DefaultBackOffLimit + } + return nil +} + +var _ Action = &JobAction{} diff --git a/internal/dataprotection/action/action_job_test.go b/internal/dataprotection/action/action_job_test.go new file mode 100644 index 00000000000..f2604c096b1 --- /dev/null +++ b/internal/dataprotection/action/action_job_test.go @@ -0,0 +1,120 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/dataprotection/action" + "github.com/apecloud/kubeblocks/internal/generics" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +var _ = Describe("JobAction Test", func() { + const ( + actionName = "test-job-action" + container = "container" + ) + + var ( + command = []string{"ls"} + ) + + cleanEnv := func() { + By("clean resources") + inNS := client.InNamespace(testCtx.DefaultNamespace) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS) + } + + BeforeEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, testdp.KBToolImage) + }) + + AfterEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, "") + }) + + Context("create job action", func() { + It("should return error when pod spec is empty", func() { + act := &action.JobAction{} + status, err := act.Execute(buildActionCtx()) + Expect(err).To(HaveOccurred()) + Expect(status.Phase).Should(Equal(dpv1alpha1.ActionPhaseFailed)) + }) + + It("should success to execute job action", func() { + labels := map[string]string{ + "dp-test-action": actionName, + } + + act := &action.JobAction{ + Name: actionName, + ObjectMeta: metav1.ObjectMeta{ + Name: actionName, + Namespace: testCtx.DefaultNamespace, + Labels: labels, + }, + PodSpec: &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: container, + Image: testdp.KBToolImage, + Command: command, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + Owner: testdp.NewFakeBackup(&testCtx, nil), + } + + By("should success to execute") + status, err := act.Execute(buildActionCtx()) + Expect(err).Should(Succeed()) + Expect(status).ShouldNot(BeNil()) + Expect(status.Phase).Should(Equal(dpv1alpha1.ActionPhaseRunning)) + + By("check the job was created") + job := &batchv1.Job{} + key := client.ObjectKey{Name: actionName, Namespace: testCtx.DefaultNamespace} + Eventually(testapps.CheckObjExists(&testCtx, key, job, true)).Should(Succeed()) + + By("set job status to complete") + testdp.PatchK8sJobStatus(&testCtx, client.ObjectKeyFromObject(job), batchv1.JobComplete) + + By("action status should be completed") + status, err = act.Execute(buildActionCtx()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(status.Phase).Should(Equal(dpv1alpha1.ActionPhaseCompleted)) + }) + }) +}) diff --git a/internal/dataprotection/action/builder_status.go b/internal/dataprotection/action/builder_status.go new file mode 100644 index 00000000000..a249f04ec92 --- /dev/null +++ b/internal/dataprotection/action/builder_status.go @@ -0,0 +1,105 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" +) + +type statusBuilder struct { + status *dpv1alpha1.ActionStatus +} + +func newStatusBuilder(a Action) *statusBuilder { + sb := &statusBuilder{ + status: &dpv1alpha1.ActionStatus{ + Name: a.GetName(), + ActionType: a.Type(), + Phase: dpv1alpha1.ActionPhaseRunning, + }, + } + return sb.startTimestamp(nil) +} + +func (b *statusBuilder) phase(phase dpv1alpha1.ActionPhase) *statusBuilder { + b.status.Phase = phase + return b +} + +func (b *statusBuilder) reason(reason string) *statusBuilder { + b.status.FailureReason = reason + return b +} + +func (b *statusBuilder) startTimestamp(timestamp *metav1.Time) *statusBuilder { + t := timestamp + if t == nil { + t = &metav1.Time{ + Time: metav1.Now().UTC(), + } + } + b.status.StartTimestamp = t + return b +} + +func (b *statusBuilder) completionTimestamp(timestamp *metav1.Time) *statusBuilder { + t := timestamp + if t == nil { + t = &metav1.Time{ + Time: metav1.Now().UTC(), + } + } + b.status.CompletionTimestamp = t + return b +} + +func (b *statusBuilder) objectRef(objectRef *corev1.ObjectReference) *statusBuilder { + b.status.ObjectRef = objectRef + return b +} + +func (b *statusBuilder) withErr(err error) *statusBuilder { + if err == nil { + return b + } + b.status.FailureReason = err.Error() + b.status.Phase = dpv1alpha1.ActionPhaseFailed + return b +} + +func (b *statusBuilder) totalSize(size string) *statusBuilder { + b.status.TotalSize = size + return b +} + +func (b *statusBuilder) timeRange(start, end *metav1.Time) *statusBuilder { + b.status.TimeRange = &dpv1alpha1.BackupTimeRange{ + Start: start, + End: end, + } + return b +} + +func (b *statusBuilder) build() *dpv1alpha1.ActionStatus { + return b.status +} diff --git a/internal/dataprotection/action/suite_test.go b/internal/dataprotection/action/suite_test.go new file mode 100644 index 00000000000..2784861662c --- /dev/null +++ b/internal/dataprotection/action/suite_test.go @@ -0,0 +1,152 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action_test + +import ( + "context" + "go/build" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + "go.uber.org/zap/zapcore" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + ctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/dataprotection/action" + "github.com/apecloud/kubeblocks/internal/testutil" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + testCtx testutil.TestContext + logger logr.Logger + recorder record.EventRecorder + + buildActionCtx = func() action.Context { + return action.Context{ + Ctx: testCtx.Ctx, + Client: testCtx.Cli, + Recorder: recorder, + Scheme: testEnv.Scheme, + RestClientConfig: cfg, + } + } +) + +func init() { + viper.AutomaticEnv() +} + +func TestAction(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Data Protection Action Suite") +} + +var _ = BeforeSuite(func() { + if viper.GetBool("ENABLE_DEBUG_LOG") { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), func(o *zap.Options) { + o.TimeEncoder = zapcore.ISO8601TimeEncoder + })) + } + + ctx, cancel = context.WithCancel(context.TODO()) + logger = logf.FromContext(ctx).WithValues() + logger.Info("logger start") + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + // use dependent external crds. + // resolved by ref: https://github.com/operator-framework/operator-sdk/issues/4434#issuecomment-786794418 + filepath.Join(build.Default.GOPATH, "pkg", "mod", "github.com", "kubernetes-csi/external-snapshotter/", + "client/v6@v6.2.0", "config", "crd"), + }, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = appsv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = vsv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = dpv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // run reconcile + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + MetricsBindAddress: "0", + ClientDisableCacheFor: ctrlutil.GetUncachedObjects(), + }) + Expect(err).ToNot(HaveOccurred()) + + testCtx = testutil.NewDefaultTestContext(ctx, k8sClient, testEnv) + recorder = k8sManager.GetEventRecorderFor("dataprotection-action-test") + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/dataprotection/action/types.go b/internal/dataprotection/action/types.go new file mode 100644 index 00000000000..ddc2d4db0d2 --- /dev/null +++ b/internal/dataprotection/action/types.go @@ -0,0 +1,27 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action + +type ErrorMode string + +const ( + ErrorModeContinue ErrorMode = "Continue" + ErrorModeFail ErrorMode = "Fail" +) diff --git a/internal/dataprotection/action/utils.go b/internal/dataprotection/action/utils.go new file mode 100644 index 00000000000..c78fcd9966d --- /dev/null +++ b/internal/dataprotection/action/utils.go @@ -0,0 +1,35 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package action + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func setControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error { + if owner == nil || reflect.ValueOf(owner).IsNil() { + return nil + } + return ctrlutil.SetControllerReference(owner, controlled, scheme) +} diff --git a/internal/dataprotection/backup/deleter.go b/internal/dataprotection/backup/deleter.go new file mode 100644 index 00000000000..a0d50a522d9 --- /dev/null +++ b/internal/dataprotection/backup/deleter.go @@ -0,0 +1,252 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup + +import ( + "fmt" + "strings" + + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + ctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +const ( + deleteBackupFilesJobNamePrefix = "delete-" +) + +type DeletionStatus string + +const ( + DeletionStatusDeleting DeletionStatus = "Deleting" + DeletionStatusFailed DeletionStatus = "Failed" + DeletionStatusSucceeded DeletionStatus = "Succeeded" + DeletionStatusUnknown DeletionStatus = "Unknown" +) + +type Deleter struct { + ctrlutil.RequestCtx + Client client.Client + Scheme *runtime.Scheme +} + +func (d *Deleter) DeleteBackupFiles(backup *dpv1alpha1.Backup) (DeletionStatus, error) { + jobKey := BuildDeleteBackupFilesJobKey(backup) + job := &batchv1.Job{} + exists, err := ctrlutil.CheckResourceExists(d.Ctx, d.Client, jobKey, job) + if err != nil { + return DeletionStatusUnknown, err + } + + // if deletion job exists, check its status + if exists { + _, finishedType, msg := utils.IsJobFinished(job) + switch finishedType { + case batchv1.JobComplete: + return DeletionStatusSucceeded, nil + case batchv1.JobFailed: + return DeletionStatusFailed, + fmt.Errorf("deletion backup files job \"%s\" failed, you can delete it to re-delete the backup files, %s", job.Name, msg) + } + return DeletionStatusDeleting, nil + } + + // if deletion job not exists, create it + pvcName := backup.Status.PersistentVolumeClaimName + if pvcName == "" { + d.Log.Info("skip deleting backup files because PersistentVolumeClaimName is empty", + "backup", backup.Name) + return DeletionStatusSucceeded, nil + } + + // check if backup repo PVC exists, if not, skip to delete backup files + pvcKey := client.ObjectKey{Namespace: backup.Namespace, Name: pvcName} + if err = d.Client.Get(d.Ctx, pvcKey, &corev1.PersistentVolumeClaim{}); err != nil { + if apierrors.IsNotFound(err) { + return DeletionStatusSucceeded, nil + } + return DeletionStatusUnknown, err + } + + backupFilePath := backup.Status.Path + if backupFilePath == "" || !strings.Contains(backupFilePath, backup.Name) { + // For compatibility: the FilePath field is changing from time to time, + // and it may not contain the backup name as a path component if the Backup object + // was created in a previous version. In this case, it's dangerous to execute + // the deletion command. For example, files belongs to other Backups can be deleted as well. + d.Log.Info("skip deleting backup files because backup file path is invalid", + "backupFilePath", backupFilePath, "backup", backup.Name) + return DeletionStatusSucceeded, nil + } + return DeletionStatusDeleting, d.createDeleteBackupFileJob(jobKey, backup, pvcName, backup.Status.Path) +} + +func (d *Deleter) createDeleteBackupFileJob( + jobKey types.NamespacedName, + backup *dpv1alpha1.Backup, + backupPVCName string, + backupFilePath string) error { + // make sure the path has a leading slash + if !strings.HasPrefix(backupFilePath, "/") { + backupFilePath = "/" + backupFilePath + } + + // this script first deletes the directory where the backup is located (including files + // in the directory), and then traverses up the path level by level to clean up empty directories. + deleteScript := fmt.Sprintf(` + backupPathBase=%s; + targetPath="${backupPathBase}%s"; + + echo "removing backup files in ${targetPath}"; + rm -rf "${targetPath}"; + + absBackupPathBase=$(realpath "${backupPathBase}"); + curr=$(realpath "${targetPath}"); + while true; do + parent=$(dirname "${curr}"); + if [ "${parent}" == "${absBackupPathBase}" ]; then + echo "reach backupPathBase ${backupPathBase}, done"; + break; + fi; + if [ ! "$(ls -A "${parent}")" ]; then + echo "${parent} is empty, removing it..."; + rmdir "${parent}"; + else + echo "${parent} is not empty, done"; + break; + fi; + curr="${parent}"; + done + `, RepoVolumeMountPath, backupFilePath) + + runAsUser := int64(0) + container := corev1.Container{ + Name: backup.Name, + Command: []string{"sh", "-c"}, + Args: []string{deleteScript}, + Image: viper.GetString(constant.KBToolsImage), + ImagePullPolicy: corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)), + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: boolptr.False(), + RunAsUser: &runAsUser, + }, + VolumeMounts: []corev1.VolumeMount{ + buildBackupRepoVolumeMount(backupPVCName), + }, + } + ctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) + + // build pod + podSpec := corev1.PodSpec{ + Containers: []corev1.Container{container}, + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + buildBackupRepoVolume(backupPVCName), + }, + } + + if err := utils.AddTolerations(&podSpec); err != nil { + return err + } + + // build job + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: jobKey.Namespace, + Name: jobKey.Name, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: jobKey.Namespace, + Name: jobKey.Name, + }, + Spec: podSpec, + }, + BackoffLimit: &dptypes.DefaultBackOffLimit, + }, + } + if err := controllerutil.SetControllerReference(backup, job, d.Scheme); err != nil { + return err + } + d.Log.V(1).Info("create a job to delete backup files", "job", job) + return client.IgnoreAlreadyExists(d.Client.Create(d.Ctx, job)) +} + +func (d *Deleter) DeleteVolumeSnapshots(backup *dpv1alpha1.Backup) error { + // initialize volume snapshot client that is compatible with both v1beta1 and v1 + vsCli := &ctrlutil.VolumeSnapshotCompatClient{ + Client: d.Client, + Ctx: d.Ctx, + } + + snaps := &vsv1.VolumeSnapshotList{} + if err := vsCli.List(snaps, client.InNamespace(backup.Namespace), + client.MatchingLabels(BuildBackupWorkloadLabels(backup))); err != nil { + return client.IgnoreNotFound(err) + } + + deleteVolumeSnapshot := func(vs *vsv1.VolumeSnapshot) error { + if controllerutil.ContainsFinalizer(vs, dptypes.DataProtectionFinalizerName) { + patch := vs.DeepCopy() + controllerutil.RemoveFinalizer(vs, dptypes.DataProtectionFinalizerName) + if err := vsCli.Patch(vs, patch); err != nil { + return err + } + } + if !vs.DeletionTimestamp.IsZero() { + return nil + } + d.Log.V(1).Info("delete volume snapshot", "volume snapshot", vs) + if err := vsCli.Delete(vs); err != nil { + return err + } + return nil + } + + for i := range snaps.Items { + if err := deleteVolumeSnapshot(&snaps.Items[i]); err != nil { + return err + } + } + return nil +} + +func BuildDeleteBackupFilesJobKey(backup *dpv1alpha1.Backup) client.ObjectKey { + jobName := fmt.Sprintf("%s-%s%s", backup.UID[:8], deleteBackupFilesJobNamePrefix, backup.Name) + if len(jobName) > 63 { + jobName = jobName[:63] + } + return client.ObjectKey{Namespace: backup.Namespace, Name: jobName} +} diff --git a/internal/dataprotection/backup/deleter_test.go b/internal/dataprotection/backup/deleter_test.go new file mode 100644 index 00000000000..b4988c3ece3 --- /dev/null +++ b/internal/dataprotection/backup/deleter_test.go @@ -0,0 +1,160 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + ctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/generics" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +var _ = Describe("Backup Deleter Test", func() { + const ( + backupRepoPVCName = "backup-repo-pvc" + backupPath = "/backup/test-backup" + backupVSName = "backup-vs" + backupPVCName = "backup-pvc" + ) + + buildDeleter := func() *Deleter { + return &Deleter{ + RequestCtx: ctrlutil.RequestCtx{ + Log: logger, + Ctx: testCtx.Ctx, + Recorder: recorder, + }, + Scheme: testEnv.Scheme, + Client: testCtx.Cli, + } + } + + cleanEnv := func() { + By("clean resources") + inNS := client.InNamespace(testCtx.DefaultNamespace) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS) + testapps.ClearResources(&testCtx, generics.VolumeSnapshotSignature, inNS) + } + + BeforeEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, testdp.KBToolImage) + }) + + AfterEach(func() { + cleanEnv() + viper.Set(constant.KBToolsImage, "") + }) + + Context("delete backup file", func() { + var ( + backup *dpv1alpha1.Backup + deleter *Deleter + ) + + BeforeEach(func() { + backup = testdp.NewFakeBackup(&testCtx, nil) + deleter = buildDeleter() + }) + + It("should success when backup status PVC is empty", func() { + Expect(backup.Status.PersistentVolumeClaimName).Should(Equal("")) + status, err := deleter.DeleteBackupFiles(backup) + Expect(err).ShouldNot(HaveOccurred()) + Expect(status).Should(Equal(DeletionStatusSucceeded)) + }) + + It("should success when backup status path is empty", func() { + backup.Status.PersistentVolumeClaimName = backupRepoPVCName + Expect(backup.Status.Path).Should(Equal("")) + status, err := deleter.DeleteBackupFiles(backup) + Expect(err).ShouldNot(HaveOccurred()) + Expect(status).Should(Equal(DeletionStatusSucceeded)) + }) + + It("should success when PVC does not exist", func() { + backup.Status.PersistentVolumeClaimName = backupRepoPVCName + backup.Status.Path = backupPath + status, err := deleter.DeleteBackupFiles(backup) + Expect(err).ShouldNot(HaveOccurred()) + Expect(status).Should(Equal(DeletionStatusSucceeded)) + }) + + It("should create job to delete backup file", func() { + By("mock backup repo PVC") + backupRepoPVC := testdp.NewFakePVC(&testCtx, backupRepoPVCName) + + By("delete backup file") + backup.Status.PersistentVolumeClaimName = backupRepoPVC.Name + backup.Status.Path = backupPath + status, err := deleter.DeleteBackupFiles(backup) + Expect(err).ShouldNot(HaveOccurred()) + Expect(status).Should(Equal(DeletionStatusDeleting)) + + By("check job exist") + job := &batchv1.Job{} + key := BuildDeleteBackupFilesJobKey(backup) + Eventually(testapps.CheckObjExists(&testCtx, key, job, true)).Should(Succeed()) + }) + }) + + Context("delete volume snapshots", func() { + var ( + backup *dpv1alpha1.Backup + deleter *Deleter + ) + + BeforeEach(func() { + backup = testdp.NewFakeBackup(&testCtx, nil) + deleter = buildDeleter() + }) + + It("should success when volume snapshot does not exist", func() { + Expect(deleter.DeleteVolumeSnapshots(backup)).Should(Succeed()) + }) + + It("should success when volume snapshot exist", func() { + By("mock volume snapshot") + vs := testdp.NewVolumeSnapshotFactory(testCtx.DefaultNamespace, backupVSName). + SetSourcePVCName(backupPVCName). + AddLabelsInMap(BuildBackupWorkloadLabels(backup)). + Create(&testCtx).GetObject() + Eventually(testapps.CheckObjExists(&testCtx, + client.ObjectKeyFromObject(vs), vs, true)).Should(Succeed()) + + By("delete volume snapshot") + Expect(deleter.DeleteVolumeSnapshots(backup)).Should(Succeed()) + + By("check volume snapshot deleted") + Eventually(testapps.CheckObjExists(&testCtx, + client.ObjectKeyFromObject(vs), vs, false)).Should(Succeed()) + }) + }) +}) diff --git a/internal/dataprotection/backup/request.go b/internal/dataprotection/backup/request.go new file mode 100644 index 00000000000..0a9cf970a6e --- /dev/null +++ b/internal/dataprotection/backup/request.go @@ -0,0 +1,410 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup + +import ( + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/dataprotection/action" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +const ( + BackupDataJobNamePrefix = "dp-backup" + prebackupJobNamePrefix = "dp-prebackup" + postbackupJobNamePrefix = "dp-postbackup" + backupDataContainerName = "backupdata" + syncProgressContainerName = "sync-progress" +) + +// Request is a request for a backup, with all references to other objects. +type Request struct { + *dpv1alpha1.Backup + intctrlutil.RequestCtx + + Client client.Client + BackupPolicy *dpv1alpha1.BackupPolicy + BackupMethod *dpv1alpha1.BackupMethod + ActionSet *dpv1alpha1.ActionSet + TargetPods []*corev1.Pod + BackupRepoPVC *corev1.PersistentVolumeClaim + BackupRepo *dpv1alpha1.BackupRepo +} + +func (r *Request) GetBackupType() string { + if r.ActionSet != nil { + return string(r.ActionSet.Spec.BackupType) + } + if r.BackupMethod != nil && boolptr.IsSetToTrue(r.BackupMethod.SnapshotVolumes) { + return string(dpv1alpha1.BackupTypeFull) + } + return "" +} + +// BuildActions builds the actions for the backup. +func (r *Request) BuildActions() ([]action.Action, error) { + var actions []action.Action + + appendIgnoreNil := func(elems ...action.Action) { + for _, elem := range elems { + if elem == nil || reflect.ValueOf(elem).IsNil() { + continue + } + actions = append(actions, elem) + } + } + + // build pre-backup actions + preBackupActions, err := r.buildPreBackupActions() + if err != nil { + return nil, err + } + + // build backup data action + backupDataAction, err := r.buildBackupDataAction() + if err != nil { + return nil, err + } + + // build create volume snapshot action + createVolumeSnapshotAction, err := r.buildCreateVolumeSnapshotAction() + if err != nil { + return nil, err + } + + // build backup kubernetes resources action + backupKubeResourcesAction, err := r.buildBackupKubeResourcesAction() + if err != nil { + return nil, err + } + + // build post-backup actions + postBackupActions, err := r.buildPostBackupActions() + if err != nil { + return nil, err + } + + appendIgnoreNil(preBackupActions...) + appendIgnoreNil(backupDataAction, createVolumeSnapshotAction, backupKubeResourcesAction) + appendIgnoreNil(postBackupActions...) + return actions, nil +} + +func (r *Request) buildPreBackupActions() ([]action.Action, error) { + if !r.backupActionSetExists() || + len(r.ActionSet.Spec.Backup.PreBackup) == 0 { + return nil, nil + } + + var actions []action.Action + for i, preBackup := range r.ActionSet.Spec.Backup.PreBackup { + a, err := r.buildAction(fmt.Sprintf("%s-%d", prebackupJobNamePrefix, i), &preBackup) + if err != nil { + return nil, err + } + actions = append(actions, a) + } + return actions, nil +} + +func (r *Request) buildPostBackupActions() ([]action.Action, error) { + if !r.backupActionSetExists() || + len(r.ActionSet.Spec.Backup.PostBackup) == 0 { + return nil, nil + } + + var actions []action.Action + for i, postBackup := range r.ActionSet.Spec.Backup.PostBackup { + a, err := r.buildAction(fmt.Sprintf("%s-%d", postbackupJobNamePrefix, i), &postBackup) + if err != nil { + return nil, err + } + actions = append(actions, a) + } + return actions, nil +} + +func (r *Request) buildBackupDataAction() (action.Action, error) { + if !r.backupActionSetExists() || + r.ActionSet.Spec.Backup.BackupData == nil { + return nil, nil + } + + backupDataAct := r.ActionSet.Spec.Backup.BackupData + podSpec := r.buildJobActionPodSpec(backupDataContainerName, &backupDataAct.JobActionSpec) + if backupDataAct.SyncProgress != nil { + r.injectSyncProgressContainer(podSpec, backupDataAct.SyncProgress) + } + + if r.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeFull { + return &action.JobAction{ + Name: BackupDataJobNamePrefix, + ObjectMeta: *buildBackupJobObjMeta(r.Backup, BackupDataJobNamePrefix), + Owner: r.Backup, + PodSpec: podSpec, + BackOffLimit: r.BackupPolicy.Spec.BackoffLimit, + }, nil + } + return nil, fmt.Errorf("unsupported backup type %s", r.ActionSet.Spec.BackupType) +} + +func (r *Request) buildCreateVolumeSnapshotAction() (action.Action, error) { + targetPod := r.TargetPods[0] + if r.BackupMethod == nil || + !boolptr.IsSetToTrue(r.BackupMethod.SnapshotVolumes) { + return nil, nil + } + + if r.BackupMethod.TargetVolumes == nil { + return nil, fmt.Errorf("targetVolumes is required for snapshotVolumes") + } + + if !utils.VolumeSnapshotEnabled() { + return nil, fmt.Errorf("volume snapshot is not enabled") + } + + pvcs, err := getPVCsByVolumeNames(r.Client, targetPod, r.BackupMethod.TargetVolumes.Volumes) + if err != nil { + return nil, err + } + + if len(pvcs) == 0 { + return nil, fmt.Errorf("no PVCs found for pod %s to back up", targetPod.Name) + } + + return &action.CreateVolumeSnapshotAction{ + Name: "createVolumeSnapshot", + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.Backup.Namespace, + Name: r.Backup.Name, + Labels: BuildBackupWorkloadLabels(r.Backup), + }, + Owner: r.Backup, + PersistentVolumeClaimWrappers: pvcs, + }, nil +} + +// TODO(ldm): implement this +func (r *Request) buildBackupKubeResourcesAction() (action.Action, error) { + return nil, nil +} + +func (r *Request) buildAction(name string, act *dpv1alpha1.ActionSpec) (action.Action, error) { + if act.Exec == nil && act.Job == nil { + return nil, fmt.Errorf("action %s has no exec or job", name) + } + if act.Exec != nil && act.Job != nil { + return nil, fmt.Errorf("action %s should have only one of exec or job", name) + } + switch { + case act.Exec != nil: + return r.buildExecAction(name, act.Exec), nil + case act.Job != nil: + return r.buildJobAction(name, act.Job) + } + return nil, nil +} + +func (r *Request) buildExecAction(name string, exec *dpv1alpha1.ExecActionSpec) action.Action { + targetPod := r.TargetPods[0] + return &action.ExecAction{ + JobAction: action.JobAction{ + Name: name, + ObjectMeta: *buildBackupJobObjMeta(r.Backup, name), + Owner: r.Backup, + }, + Command: exec.Command, + Container: exec.Container, + Namespace: targetPod.Namespace, + PodName: targetPod.Name, + Timeout: exec.Timeout, + ServiceAccountName: r.targetServiceAccountName(), + } +} + +func (r *Request) buildJobAction(name string, job *dpv1alpha1.JobActionSpec) (action.Action, error) { + return &action.JobAction{ + Name: name, + ObjectMeta: *buildBackupJobObjMeta(r.Backup, name), + Owner: r.Backup, + PodSpec: r.buildJobActionPodSpec(name, job), + BackOffLimit: r.BackupPolicy.Spec.BackoffLimit, + }, nil +} + +func (r *Request) buildJobActionPodSpec(name string, job *dpv1alpha1.JobActionSpec) *corev1.PodSpec { + targetPod := r.TargetPods[0] + // build environment variables, include built-in envs, envs from backupMethod + // and envs from actionSet. Latter will override former for the same name. + // env from backupMethod has the highest priority. + buildEnv := func() []corev1.EnvVar { + envVars := []corev1.EnvVar{ + { + Name: dptypes.DPBackupName, + Value: r.Backup.Name, + }, + { + Name: dptypes.DPBackupDIR, + Value: buildBackupPathInContainer(r.Backup, r.BackupPolicy.Spec.PathPrefix), + }, + { + Name: dptypes.DPTargetPodName, + Value: targetPod.Name, + }, + { + Name: dptypes.DPTTL, + Value: r.Spec.RetentionPeriod.String(), + }, + } + envVars = append(envVars, utils.BuildEnvByCredential(targetPod, r.BackupPolicy.Spec.Target.ConnectionCredential)...) + if r.ActionSet != nil { + envVars = append(envVars, r.ActionSet.Spec.Env...) + } + return utils.MergeEnv(envVars, r.BackupMethod.Env) + } + + buildVolumes := func() []corev1.Volume { + return append([]corev1.Volume{ + buildBackupRepoVolume(r.BackupRepoPVC.Name), + }, getVolumesByVolumeInfo(targetPod, r.BackupMethod.TargetVolumes)...) + } + + buildVolumeMounts := func() []corev1.VolumeMount { + return append([]corev1.VolumeMount{ + buildBackupRepoVolumeMount(r.BackupRepoPVC.Name), + }, getVolumeMountsByVolumeInfo(targetPod, r.BackupMethod.TargetVolumes)...) + } + + runAsUser := int64(0) + container := corev1.Container{ + Name: name, + Image: job.Image, + Command: job.Command, + Env: buildEnv(), + VolumeMounts: buildVolumeMounts(), + ImagePullPolicy: corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)), + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: boolptr.False(), + RunAsUser: &runAsUser, + }, + } + + if r.BackupMethod.RuntimeSettings != nil { + container.Resources = r.BackupMethod.RuntimeSettings.Resources + } + + if r.ActionSet != nil { + container.EnvFrom = r.ActionSet.Spec.EnvFrom + } + + intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) + + podSpec := &corev1.PodSpec{ + Containers: []corev1.Container{container}, + Volumes: buildVolumes(), + ServiceAccountName: r.targetServiceAccountName(), + RestartPolicy: corev1.RestartPolicyNever, + + // tolerate all taints + Tolerations: []corev1.Toleration{ + { + Operator: corev1.TolerationOpExists, + }, + }, + } + + if boolptr.IsSetToTrue(job.RunOnTargetPodNode) { + podSpec.NodeSelector = map[string]string{ + corev1.LabelHostname: targetPod.Spec.NodeName, + } + } + return podSpec +} + +// injectSyncProgressContainer injects a container to sync the backup progress. +func (r *Request) injectSyncProgressContainer(podSpec *corev1.PodSpec, + sync *dpv1alpha1.SyncProgress) { + if !boolptr.IsSetToTrue(sync.Enabled) { + return + } + + // build container to sync backup progress that will update the backup status + container := podSpec.Containers[0].DeepCopy() + container.Name = syncProgressContainerName + container.Image = viper.GetString(constant.KBToolsImage) + container.ImagePullPolicy = corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)) + container.Resources = corev1.ResourceRequirements{Limits: nil, Requests: nil} + intctrlutil.InjectZeroResourcesLimitsIfEmpty(container) + container.Command = []string{"sh", "-c"} + + // append some envs + checkIntervalSeconds := int32(5) + if sync.IntervalSeconds != nil && *sync.IntervalSeconds > 0 { + checkIntervalSeconds = *sync.IntervalSeconds + } + container.Env = append(container.Env, + corev1.EnvVar{ + Name: dptypes.DPBackupInfoFile, + Value: buildBackupInfoFilePath(r.Backup, r.BackupPolicy.Spec.PathPrefix), + }, + corev1.EnvVar{ + Name: dptypes.DPCheckInterval, + Value: fmt.Sprintf("%d", checkIntervalSeconds)}, + ) + + args := fmt.Sprintf(` +set -o errexit; +set -o nounset; +while [ ! -f ${%[1]s} ]; do + sleep ${%[2]s} +done +backupInfo=$(cat ${%[1]s}); +echo backupInfo:${backupInfo}; +eval kubectl -n %[3]s patch backup %[4]s --subresource=status --type=merge --patch '{\"status\":${backupInfo}}'; +`, dptypes.DPBackupInfoFile, dptypes.DPCheckInterval, r.Backup.Namespace, r.Backup.Name) + + container.Args = []string{args} + podSpec.Containers = append(podSpec.Containers, *container) +} + +func (r *Request) backupActionSetExists() bool { + return r.ActionSet != nil && r.ActionSet.Spec.Backup != nil +} + +func (r *Request) targetServiceAccountName() string { + saName := r.BackupPolicy.Spec.Target.ServiceAccountName + if len(saName) > 0 { + return saName + } + // service account name is not specified, use the target pod service account + targetPod := r.TargetPods[0] + return targetPod.Spec.ServiceAccountName +} diff --git a/internal/dataprotection/backup/request_test.go b/internal/dataprotection/backup/request_test.go new file mode 100644 index 00000000000..03c19330a9b --- /dev/null +++ b/internal/dataprotection/backup/request_test.go @@ -0,0 +1,20 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup diff --git a/internal/dataprotection/backup/scheduler.go b/internal/dataprotection/backup/scheduler.go new file mode 100644 index 00000000000..3ac0af7f2e3 --- /dev/null +++ b/internal/dataprotection/backup/scheduler.go @@ -0,0 +1,369 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup + +import ( + "fmt" + "sort" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dperrors "github.com/apecloud/kubeblocks/internal/dataprotection/errors" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + dputils "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +type Scheduler struct { + intctrlutil.RequestCtx + Client client.Client + Scheme *k8sruntime.Scheme + BackupSchedule *dpv1alpha1.BackupSchedule + BackupPolicy *dpv1alpha1.BackupPolicy +} + +func (s *Scheduler) Schedule() error { + if err := s.validate(); err != nil { + return err + } + + for i := range s.BackupSchedule.Spec.Schedules { + if err := s.handleSchedulePolicy(i); err != nil { + return err + } + } + return nil +} + +// validate validates the backup schedule. +func (s *Scheduler) validate() error { + methodInBackupPolicy := func(name string) bool { + for _, method := range s.BackupPolicy.Spec.BackupMethods { + if method.Name == name { + return true + } + } + return false + } + + for _, sp := range s.BackupSchedule.Spec.Schedules { + if methodInBackupPolicy(sp.BackupMethod) { + continue + } + // backup method name is not in backup policy + return fmt.Errorf("backup method %s is not in backup policy %s/%s", + sp.BackupMethod, s.BackupPolicy.Namespace, s.BackupPolicy.Name) + } + return nil +} + +func (s *Scheduler) handleSchedulePolicy(index int) error { + schedulePolicy := &s.BackupSchedule.Spec.Schedules[index] + // TODO(ldm): better to remove this dependency in the future + if err := s.reconfigure(schedulePolicy); err != nil { + return err + } + + // create/delete/patch cronjob workload + return s.reconcileCronJob(schedulePolicy) +} + +type backupReconfigureRef struct { + Name string `json:"name"` + Key string `json:"key"` + Enable parameterPairs `json:"enable,omitempty"` + Disable parameterPairs `json:"disable,omitempty"` +} + +type parameterPairs map[string][]appsv1alpha1.ParameterPair + +func (s *Scheduler) reconfigure(schedulePolicy *dpv1alpha1.SchedulePolicy) error { + reCfgRef := s.BackupSchedule.Annotations[dptypes.ReconfigureRefAnnotationKey] + if reCfgRef == "" { + return nil + } + configRef := backupReconfigureRef{} + if err := json.Unmarshal([]byte(reCfgRef), &configRef); err != nil { + return err + } + + enable := boolptr.IsSetToTrue(schedulePolicy.Enabled) + if s.BackupSchedule.Annotations[constant.LastAppliedConfigAnnotationKey] == "" && !enable { + // disable in the first policy created, no need reconfigure because default configs had been set. + return nil + } + configParameters := configRef.Disable + if enable { + configParameters = configRef.Enable + } + if configParameters == nil { + return nil + } + parameters := configParameters[schedulePolicy.BackupMethod] + if len(parameters) == 0 { + // skip reconfigure if not found parameters. + return nil + } + updateParameterPairsBytes, _ := json.Marshal(parameters) + updateParameterPairs := string(updateParameterPairsBytes) + if updateParameterPairs == s.BackupSchedule.Annotations[constant.LastAppliedConfigAnnotationKey] { + // reconcile the config job if finished + return s.reconcileReconfigure() + } + + targetPodSelector := s.BackupPolicy.Spec.Target.PodSelector + ops := appsv1alpha1.OpsRequest{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: s.BackupSchedule.Name + "-", + Namespace: s.BackupSchedule.Namespace, + Labels: map[string]string{ + dptypes.DataProtectionLabelBackupScheduleKey: s.BackupSchedule.Name, + }, + }, + Spec: appsv1alpha1.OpsRequestSpec{ + Type: appsv1alpha1.ReconfiguringType, + ClusterRef: targetPodSelector.MatchLabels[constant.AppInstanceLabelKey], + Reconfigure: &appsv1alpha1.Reconfigure{ + ComponentOps: appsv1alpha1.ComponentOps{ + ComponentName: targetPodSelector.MatchLabels[constant.KBAppComponentLabelKey], + }, + Configurations: []appsv1alpha1.ConfigurationItem{ + { + Name: configRef.Name, + Keys: []appsv1alpha1.ParameterConfig{ + { + Key: configRef.Key, + Parameters: parameters, + }, + }, + }, + }, + }, + }, + } + if err := s.Client.Create(s.Ctx, &ops); err != nil { + return err + } + s.Recorder.Eventf(s.BackupSchedule, corev1.EventTypeNormal, "Reconfiguring", "update config %s", updateParameterPairs) + patch := client.MergeFrom(s.BackupSchedule.DeepCopy()) + if s.BackupSchedule.Annotations == nil { + s.BackupSchedule.Annotations = map[string]string{} + } + s.BackupSchedule.Annotations[constant.LastAppliedConfigAnnotationKey] = updateParameterPairs + if err := s.Client.Patch(s.Ctx, s.BackupSchedule, patch); err != nil { + return err + } + return intctrlutil.NewErrorf(intctrlutil.ErrorTypeRequeue, "requeue to waiting for ops %s finished.", ops.Name) +} + +func (s *Scheduler) reconcileReconfigure() error { + opsList := appsv1alpha1.OpsRequestList{} + if err := s.Client.List(s.Ctx, &opsList, + client.InNamespace(s.BackupSchedule.Namespace), + client.MatchingLabels{dptypes.DataProtectionLabelBackupScheduleKey: s.BackupPolicy.Name}); err != nil { + return err + } + if len(opsList.Items) > 0 { + sort.Slice(opsList.Items, func(i, j int) bool { + return opsList.Items[j].CreationTimestamp.Before(&opsList.Items[i].CreationTimestamp) + }) + latestOps := opsList.Items[0] + if latestOps.Status.Phase == appsv1alpha1.OpsFailedPhase { + return intctrlutil.NewErrorf(dperrors.ErrorTypeReconfigureFailed, "ops failed %s", latestOps.Name) + } else if latestOps.Status.Phase != appsv1alpha1.OpsSucceedPhase { + return intctrlutil.NewErrorf(intctrlutil.ErrorTypeRequeue, "waiting for ops %s finished.", latestOps.Name) + } + } + return nil +} + +// buildCronJob builds cronjob from backup schedule. +func (s *Scheduler) buildCronJob( + schedulePolicy *dpv1alpha1.SchedulePolicy, + cronJobName string) (*batchv1.CronJob, error) { + var ( + successfulJobsHistoryLimit int32 = 0 + failedJobsHistoryLimit int32 = 1 + ) + + if cronJobName == "" { + cronJobName = GenerateCRNameByBackupSchedule(s.BackupSchedule, schedulePolicy.BackupMethod) + } + + podSpec, err := s.buildPodSpec(schedulePolicy) + if err != nil { + return nil, err + } + + cronjob := &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: cronJobName, + Namespace: s.BackupSchedule.Namespace, + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + }, + }, + Spec: batchv1.CronJobSpec{ + Schedule: schedulePolicy.CronExpression, + SuccessfulJobsHistoryLimit: &successfulJobsHistoryLimit, + FailedJobsHistoryLimit: &failedJobsHistoryLimit, + ConcurrencyPolicy: batchv1.ForbidConcurrent, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + BackoffLimit: s.BackupPolicy.Spec.BackoffLimit, + Template: corev1.PodTemplateSpec{ + Spec: *podSpec, + }, + }, + }, + }, + } + + controllerutil.AddFinalizer(cronjob, dptypes.DataProtectionFinalizerName) + // set labels + for k, v := range s.BackupSchedule.Labels { + if cronjob.Labels == nil { + cronjob.SetLabels(map[string]string{}) + } + cronjob.Labels[k] = v + } + cronjob.Labels[dptypes.DataProtectionLabelBackupScheduleKey] = s.BackupSchedule.Name + cronjob.Labels[dptypes.DataProtectionLabelBackupMethodKey] = schedulePolicy.BackupMethod + return cronjob, nil +} + +func (s *Scheduler) buildPodSpec(schedulePolicy *dpv1alpha1.SchedulePolicy) (*corev1.PodSpec, error) { + // TODO(ldm): add backup deletionPolicy + createBackupCmd := fmt.Sprintf(` +kubectl create -f - < 0 { + cronJob = &cronJobList.Items[0] + } + + // schedule is disabled, delete cronjob if exists + if schedulePolicy == nil || !boolptr.IsSetToTrue(schedulePolicy.Enabled) { + if len(cronJob.Name) != 0 { + // delete the old cronjob. + if err := dputils.RemoveDataProtectionFinalizer(s.Ctx, s.Client, cronJob); err != nil { + return err + } + return s.Client.Delete(s.Ctx, cronJob) + } + // if no cron expression, return + return nil + } + + cronjobProto, err := s.buildCronJob(schedulePolicy, cronJob.Name) + if err != nil { + return err + } + + if s.BackupSchedule.Spec.StartingDeadlineMinutes != nil { + startingDeadlineSeconds := *s.BackupSchedule.Spec.StartingDeadlineMinutes * 60 + cronjobProto.Spec.StartingDeadlineSeconds = &startingDeadlineSeconds + } + + if len(cronJob.Name) == 0 { + // if no cronjob, create it. + return s.Client.Create(s.Ctx, cronjobProto) + } + + // sync the cronjob with the current backup policy configuration. + patch := client.MergeFrom(cronJob.DeepCopy()) + cronJob.Spec.StartingDeadlineSeconds = cronjobProto.Spec.StartingDeadlineSeconds + cronJob.Spec.JobTemplate.Spec.BackoffLimit = s.BackupPolicy.Spec.BackoffLimit + cronJob.Spec.JobTemplate.Spec.Template = cronjobProto.Spec.JobTemplate.Spec.Template + cronJob.Spec.Schedule = schedulePolicy.CronExpression + return s.Client.Patch(s.Ctx, cronJob, patch) +} + +func (s *Scheduler) generateBackupName() string { + target := s.BackupPolicy.Spec.Target + + // if cluster name can be found in target labels, use it as backup name prefix + backupNamePrefix := target.PodSelector.MatchLabels[constant.AppInstanceLabelKey] + + // if cluster name can not be found, use backup schedule name as backup name prefix + if backupNamePrefix == "" { + backupNamePrefix = s.BackupSchedule.Name + } + return backupNamePrefix + "-$(date -u +'%Y%m%d%H%M%S')" +} diff --git a/internal/dataprotection/backup/scheduler_test.go b/internal/dataprotection/backup/scheduler_test.go new file mode 100644 index 00000000000..03c19330a9b --- /dev/null +++ b/internal/dataprotection/backup/scheduler_test.go @@ -0,0 +1,20 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup diff --git a/internal/dataprotection/backup/suite_test.go b/internal/dataprotection/backup/suite_test.go new file mode 100644 index 00000000000..455076843c8 --- /dev/null +++ b/internal/dataprotection/backup/suite_test.go @@ -0,0 +1,141 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup + +import ( + "context" + "go/build" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + "go.uber.org/zap/zapcore" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + ctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/testutil" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + testCtx testutil.TestContext + logger logr.Logger + recorder record.EventRecorder +) + +func init() { + viper.AutomaticEnv() +} + +func TestAction(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Data Protection Backup Suite") +} + +var _ = BeforeSuite(func() { + if viper.GetBool("ENABLE_DEBUG_LOG") { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), func(o *zap.Options) { + o.TimeEncoder = zapcore.ISO8601TimeEncoder + })) + } + + ctx, cancel = context.WithCancel(context.TODO()) + logger = logf.FromContext(ctx).WithValues() + logger.Info("logger start") + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + // use dependent external crds. + // resolved by ref: https://github.com/operator-framework/operator-sdk/issues/4434#issuecomment-786794418 + filepath.Join(build.Default.GOPATH, "pkg", "mod", "github.com", "kubernetes-csi/external-snapshotter/", + "client/v6@v6.2.0", "config", "crd"), + }, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = appsv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = vsv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = dpv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // run reconcile + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + MetricsBindAddress: "0", + ClientDisableCacheFor: ctrlutil.GetUncachedObjects(), + }) + Expect(err).ToNot(HaveOccurred()) + + testCtx = testutil.NewDefaultTestContext(ctx, k8sClient, testEnv) + recorder = k8sManager.GetEventRecorderFor("dataprotection-backup-test") + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/dataprotection/backup/types.go b/internal/dataprotection/backup/types.go new file mode 100644 index 00000000000..412cc49ee58 --- /dev/null +++ b/internal/dataprotection/backup/types.go @@ -0,0 +1,31 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup + +// FormatVersion is the backup file format version, including major, minor, and patch version. +const ( + FormatVersion = "0.1.0" + + // RepoVolumeMountPath is the backup repo volume mount path. + RepoVolumeMountPath = "/backupdata" + + // backupInfoFileName is the backup info file name in the backup path. + backupInfoFileName = "backup.info" +) diff --git a/internal/dataprotection/backup/utils.go b/internal/dataprotection/backup/utils.go new file mode 100644 index 00000000000..95cf776bd90 --- /dev/null +++ b/internal/dataprotection/backup/utils.go @@ -0,0 +1,235 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package backup + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/dataprotection/action" + "github.com/apecloud/kubeblocks/internal/dataprotection/types" +) + +// GetBackupPolicy returns the BackupPolicy with the given namespace and name. +func GetBackupPolicy(ctx context.Context, cli client.Client, namespace, name string) (*dpv1alpha1.BackupPolicy, error) { + backupPolicy := &dpv1alpha1.BackupPolicy{} + if err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, backupPolicy); err != nil { + return nil, err + } + return backupPolicy, nil +} + +func GetActionSet(ctx context.Context, cli client.Client, namespace, name string) (*dpv1alpha1.ActionSet, error) { + actionSet := &dpv1alpha1.ActionSet{} + if err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, actionSet); err != nil { + return nil, err + } + return actionSet, nil +} + +func getVolumesByNames(pod *corev1.Pod, volumeNames []string) []corev1.Volume { + var volumes []corev1.Volume + for _, v := range pod.Spec.Volumes { + for _, name := range volumeNames { + if v.Name == name { + volumes = append(volumes, v) + } + } + } + return volumes +} + +func getVolumesByMounts(pod *corev1.Pod, mounts []corev1.VolumeMount) []corev1.Volume { + var volumes []corev1.Volume + for _, v := range pod.Spec.Volumes { + for _, m := range mounts { + if v.Name == m.Name { + volumes = append(volumes, v) + } + } + } + return volumes +} + +// TODO: if the result is empty, should we return the pod's volumes? +// +// if volumes can not found in the pod spec, maybe output a warning log? +func getVolumesByVolumeInfo(pod *corev1.Pod, volumeInfo *dpv1alpha1.TargetVolumeInfo) []corev1.Volume { + if volumeInfo == nil { + return nil + } + var volumes []corev1.Volume + if len(volumeInfo.Volumes) > 0 { + volumes = getVolumesByNames(pod, volumeInfo.Volumes) + } else if len(volumeInfo.VolumeMounts) > 0 { + volumes = getVolumesByMounts(pod, volumeInfo.VolumeMounts) + } + return volumes +} + +func getVolumeMountsByVolumeInfo(pod *corev1.Pod, info *dpv1alpha1.TargetVolumeInfo) []corev1.VolumeMount { + if info == nil || len(info.VolumeMounts) == 0 { + return nil + } + var mounts []corev1.VolumeMount + for _, v := range pod.Spec.Volumes { + for _, m := range info.VolumeMounts { + if v.Name == m.Name { + mounts = append(mounts, m) + } + } + } + return mounts +} + +func getPVCsByVolumeNames(cli client.Client, + pod *corev1.Pod, + volumeNames []string) ([]action.PersistentVolumeClaimWrapper, error) { + if len(volumeNames) == 0 { + return nil, nil + } + var all []action.PersistentVolumeClaimWrapper + for _, v := range pod.Spec.Volumes { + if v.PersistentVolumeClaim == nil { + continue + } + for _, name := range volumeNames { + if v.Name != name { + continue + } + // get the PVC from pod's volumes + tmp := corev1.PersistentVolumeClaim{} + pvcKey := client.ObjectKey{Namespace: pod.Namespace, Name: v.PersistentVolumeClaim.ClaimName} + if err := cli.Get(context.Background(), pvcKey, &tmp); err != nil { + return nil, err + } + + all = append(all, action.NewPersistentVolumeClaimWrapper(*tmp.DeepCopy(), name)) + } + } + return all, nil +} + +func GenerateBackupRepoVolumeName(pvcName string) string { + return fmt.Sprintf("dp-backup-%s", pvcName) +} + +func buildBackupRepoVolume(pvcName string) corev1.Volume { + return corev1.Volume{ + Name: GenerateBackupRepoVolumeName(pvcName), + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + } +} + +func buildBackupRepoVolumeMount(pvcName string) corev1.VolumeMount { + return corev1.VolumeMount{ + Name: GenerateBackupRepoVolumeName(pvcName), + MountPath: RepoVolumeMountPath, + } +} + +func excludeLabelsForWorkload() []string { + return []string{constant.KBAppComponentLabelKey} +} + +// BuildBackupWorkloadLabels builds the labels for workload which owned by backup. +func BuildBackupWorkloadLabels(backup *dpv1alpha1.Backup) map[string]string { + labels := backup.Labels + if labels == nil { + labels = map[string]string{} + } else { + for _, v := range excludeLabelsForWorkload() { + delete(labels, v) + } + } + labels[types.DataProtectionLabelBackupNameKey] = backup.Name + return labels +} + +func buildBackupJobObjMeta(backup *dpv1alpha1.Backup, prefix string) *metav1.ObjectMeta { + return &metav1.ObjectMeta{ + Name: GenerateBackupJobName(backup, prefix), + Namespace: backup.Namespace, + Labels: BuildBackupWorkloadLabels(backup), + } +} + +func GenerateBackupJobName(backup *dpv1alpha1.Backup, prefix string) string { + name := fmt.Sprintf("%s-%s-%s", prefix, backup.Name, backup.UID[:8]) + // job name cannot exceed 63 characters for label name limit. + if len(name) > 63 { + return name[:63] + } + return name +} + +// GenerateCRNameByBackupSchedule generate a CR name which is created by BackupSchedule, such as CronJob Backup. +func GenerateCRNameByBackupSchedule(backupSchedule *dpv1alpha1.BackupSchedule, method string) string { + name := fmt.Sprintf("%s-%s", generateUniqueNameWithBackupSchedule(backupSchedule), backupSchedule.Namespace) + if len(name) > 30 { + name = strings.TrimRight(name[:30], "-") + } + return fmt.Sprintf("%s-%s", name, method) +} + +func generateUniqueNameWithBackupSchedule(backupSchedule *dpv1alpha1.BackupSchedule) string { + uniqueName := backupSchedule.Name + if len(backupSchedule.OwnerReferences) > 0 { + uniqueName = fmt.Sprintf("%s-%s", backupSchedule.OwnerReferences[0].UID[:8], backupSchedule.OwnerReferences[0].Name) + } + return uniqueName +} + +func buildBackupInfoFilePath(backup *dpv1alpha1.Backup, pathPrefix string) string { + return buildBackupPathInContainer(backup, pathPrefix) + "/" + backupInfoFileName +} + +func buildBackupPathInContainer(backup *dpv1alpha1.Backup, pathPrefix string) string { + return RepoVolumeMountPath + BuildBackupPath(backup, pathPrefix) +} + +// BuildBackupPath builds the path to storage backup datas in backup repository. +func BuildBackupPath(backup *dpv1alpha1.Backup, pathPrefix string) string { + pathPrefix = strings.TrimRight(pathPrefix, "/") + if strings.TrimSpace(pathPrefix) == "" || strings.HasPrefix(pathPrefix, "/") { + return fmt.Sprintf("/%s%s/%s", backup.Namespace, pathPrefix, backup.Name) + } + return fmt.Sprintf("/%s/%s/%s", backup.Namespace, pathPrefix, backup.Name) +} + +func GetSchedulePolicyByMethod(backupSchedule *dpv1alpha1.BackupSchedule, method string) *dpv1alpha1.SchedulePolicy { + for _, s := range backupSchedule.Spec.Schedules { + if s.BackupMethod == method { + return &s + } + } + return nil +} diff --git a/internal/dataprotection/builder/builder.go b/internal/dataprotection/builder/builder.go new file mode 100644 index 00000000000..cbbe980f497 --- /dev/null +++ b/internal/dataprotection/builder/builder.go @@ -0,0 +1,41 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/apecloud/kubeblocks/internal/constant" +) + +func BuildVolumeSnapshotClass(name string, driver string) *vsv1.VolumeSnapshotClass { + vsc := &vsv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + constant.KBManagedByKey: constant.AppName, + }, + }, + Driver: driver, + DeletionPolicy: vsv1.VolumeSnapshotContentDelete, + } + return vsc +} diff --git a/internal/dataprotection/builder/builder_test.go b/internal/dataprotection/builder/builder_test.go new file mode 100644 index 00000000000..b10522e9964 --- /dev/null +++ b/internal/dataprotection/builder/builder_test.go @@ -0,0 +1,36 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("builder", func() { + It("builds volume snapshot class correctly", func() { + className := "vsc-test" + driverName := "csi-driver-test" + obj := BuildVolumeSnapshotClass(className, driverName) + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Name).Should(Equal(className)) + Expect(obj.Driver).Should(Equal(driverName)) + }) +}) diff --git a/internal/dataprotection/builder/suite_test.go b/internal/dataprotection/builder/suite_test.go new file mode 100644 index 00000000000..7cb19699de3 --- /dev/null +++ b/internal/dataprotection/builder/suite_test.go @@ -0,0 +1,47 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Data Protection Builder Suite") +} + +var _ = BeforeSuite(func() { + // +kubebuilder:scaffold:scheme + + go func() { + defer GinkgoRecover() + }() +}) + +var _ = AfterSuite(func() { +}) diff --git a/internal/dataprotection/errors/errors.go b/internal/dataprotection/errors/errors.go new file mode 100644 index 00000000000..207f374bd32 --- /dev/null +++ b/internal/dataprotection/errors/errors.go @@ -0,0 +1,85 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package errors + +import ( + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" +) + +// ErrorType for backup +const ( + // ErrorTypeBackupNotSupported this backup type not supported + ErrorTypeBackupNotSupported intctrlutil.ErrorType = "BackupNotSupported" + // ErrorTypeBackupPVTemplateNotFound this pv template not found + ErrorTypeBackupPVTemplateNotFound intctrlutil.ErrorType = "BackupPVTemplateNotFound" + // ErrorTypeBackupNotCompleted report backup not completed. + ErrorTypeBackupNotCompleted intctrlutil.ErrorType = "BackupNotCompleted" + // ErrorTypeBackupPVCNameIsEmpty pvc name for backup is empty + ErrorTypeBackupPVCNameIsEmpty intctrlutil.ErrorType = "BackupPVCNameIsEmpty" + // ErrorTypeBackupJobFailed backup job failed + ErrorTypeBackupJobFailed intctrlutil.ErrorType = "BackupJobFailed" + // ErrorTypeStorageNotMatch storage not match + ErrorTypeStorageNotMatch intctrlutil.ErrorType = "ErrorTypeStorageNotMatch" + // ErrorTypeReconfigureFailed reconfigure failed + ErrorTypeReconfigureFailed intctrlutil.ErrorType = "ErrorTypeReconfigureFailed" + // ErrorTypeInvalidLogfileBackupName invalid logfile backup name + ErrorTypeInvalidLogfileBackupName intctrlutil.ErrorType = "InvalidLogfileBackupName" + // ErrorTypeBackupScheduleDisabled backup schedule disabled + ErrorTypeBackupScheduleDisabled intctrlutil.ErrorType = "BackupScheduleDisabled" + // ErrorTypeLogfileScheduleDisabled logfile schedule disabled + ErrorTypeLogfileScheduleDisabled intctrlutil.ErrorType = "LogfileScheduleDisabled" + // ErrorTypeWaitForExternalHandler wait for external handler to handle the Backup or Restore + ErrorTypeWaitForExternalHandler intctrlutil.ErrorType = "WaitForExternalHandler" +) + +// NewBackupNotSupported returns a new Error with ErrorTypeBackupNotSupported. +func NewBackupNotSupported(backupType, backupPolicyName string) *intctrlutil.Error { + return intctrlutil.NewErrorf(ErrorTypeBackupNotSupported, `backup type "%s" not supported by backup policy "%s"`, backupType, backupPolicyName) +} + +// NewBackupPVTemplateNotFound returns a new Error with ErrorTypeBackupPVTemplateNotFound. +func NewBackupPVTemplateNotFound(cmName, cmNamespace string) *intctrlutil.Error { + return intctrlutil.NewErrorf(ErrorTypeBackupPVTemplateNotFound, `"the persistentVolume template is empty in the configMap %s/%s", pvConfig.Namespace, pvConfig.Name`, cmNamespace, cmName) +} + +// NewBackupPVCNameIsEmpty returns a new Error with ErrorTypeBackupPVCNameIsEmpty. +func NewBackupPVCNameIsEmpty(backupRepo, backupPolicyName string) *intctrlutil.Error { + return intctrlutil.NewErrorf(ErrorTypeBackupPVCNameIsEmpty, `the persistentVolumeClaim name of %s is empty in BackupPolicy "%s"`, backupRepo, backupPolicyName) +} + +// NewBackupJobFailed returns a new Error with ErrorTypeBackupJobFailed. +func NewBackupJobFailed(jobName string) *intctrlutil.Error { + return intctrlutil.NewErrorf(ErrorTypeBackupJobFailed, `backup job "%s" failed`, jobName) +} + +// NewInvalidLogfileBackupName returns a new Error with ErrorTypeInvalidLogfileBackupName. +func NewInvalidLogfileBackupName(backupPolicyName string) *intctrlutil.Error { + return intctrlutil.NewErrorf(ErrorTypeInvalidLogfileBackupName, `backup name is incorrect for logfile, you can create the logfile backup by enabling the schedule in BackupPolicy "%s"`, backupPolicyName) +} + +// NewBackupScheduleDisabled returns a new Error with ErrorTypeBackupScheduleDisabled. +func NewBackupScheduleDisabled(backupType, backupPolicyName string) *intctrlutil.Error { + return intctrlutil.NewErrorf(ErrorTypeBackupScheduleDisabled, `%s schedule is disabled, you can enable spec.schedule.%s in BackupPolicy "%s"`, backupType, backupType, backupPolicyName) +} + +// NewBackupLogfileScheduleDisabled returns a new Error with ErrorTypeLogfileScheduleDisabled. +func NewBackupLogfileScheduleDisabled(backupToolName string) *intctrlutil.Error { + return intctrlutil.NewErrorf(ErrorTypeLogfileScheduleDisabled, `BackupTool "%s" of the backup relies on logfile. Please enable the logfile scheduling firstly`, backupToolName) +} diff --git a/internal/dataprotection/errors/errors_test.go b/internal/dataprotection/errors/errors_test.go new file mode 100644 index 00000000000..fde1dcf9335 --- /dev/null +++ b/internal/dataprotection/errors/errors_test.go @@ -0,0 +1,229 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package errors + +import ( + "fmt" + "reflect" + "testing" + + "github.com/pkg/errors" + + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" +) + +func TestNerError(t *testing.T) { + err1 := intctrlutil.NewError(ErrorTypeBackupNotCompleted, "test c2") + if err1.Error() != "test c2" { + t.Error("NewErrorf failed") + } +} + +func TestNerErrorf(t *testing.T) { + err1 := intctrlutil.NewErrorf(ErrorTypeBackupNotCompleted, "test %s %s", "c1", "c2") + if err1.Error() != "test c1 c2" { + t.Error("NewErrorf failed") + } + testError := fmt.Errorf("test: %w", err1) + if !errors.Is(testError, err1) { + t.Error("errors.Is failed") + } + + var target *intctrlutil.Error + if !errors.As(testError, &target) { + t.Error("errors.As failed") + } +} + +func TestNewErrors(t *testing.T) { + backupNotSupported := NewBackupNotSupported("datafile", "policy-test") + if !intctrlutil.IsTargetError(backupNotSupported, ErrorTypeBackupNotSupported) { + t.Error("should be error of BackupNotSupported") + } + pvTemplateNotFound := NewBackupPVTemplateNotFound("configName", "default") + if !intctrlutil.IsTargetError(pvTemplateNotFound, ErrorTypeBackupPVTemplateNotFound) { + t.Error("should be error of BackupPVTemplateNotFound") + } + pvsIsEmpty := NewBackupPVCNameIsEmpty("datafile", "policy-test1") + if !intctrlutil.IsTargetError(pvsIsEmpty, ErrorTypeBackupPVCNameIsEmpty) { + t.Error("should be error of BackupPVCNameIsEmpty") + } + jobFailed := NewBackupJobFailed("jobName") + if !intctrlutil.IsTargetError(jobFailed, ErrorTypeBackupJobFailed) { + t.Error("should be error of BackupJobFailed") + } +} + +func TestUnwrapControllerError(t *testing.T) { + backupNotSupported := NewBackupNotSupported("datafile", "policy-test") + newErr := intctrlutil.UnwrapControllerError(backupNotSupported) + if newErr == nil { + t.Error("should unwrap a controller error, but got nil") + } + err := errors.New("test error") + newErr = intctrlutil.UnwrapControllerError(err) + if newErr != nil { + t.Errorf("should not unwrap a controller error, but got: %v", newErr) + } +} + +func TestNewBackupJobFailed(t *testing.T) { + type args struct { + jobName string + } + tests := []struct { + name string + args args + want *intctrlutil.Error + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewBackupJobFailed(tt.args.jobName); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewBackupJobFailed() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNewBackupLogfileScheduleDisabled(t *testing.T) { + type args struct { + backupToolName string + } + tests := []struct { + name string + args args + want *intctrlutil.Error + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewBackupLogfileScheduleDisabled(tt.args.backupToolName); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewBackupLogfileScheduleDisabled() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNewBackupNotSupported(t *testing.T) { + type args struct { + backupType string + backupPolicyName string + } + tests := []struct { + name string + args args + want *intctrlutil.Error + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewBackupNotSupported(tt.args.backupType, tt.args.backupPolicyName); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewBackupNotSupported() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNewBackupPVCNameIsEmpty(t *testing.T) { + type args struct { + backupRepo string + backupPolicyName string + } + tests := []struct { + name string + args args + want *intctrlutil.Error + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewBackupPVCNameIsEmpty(tt.args.backupRepo, tt.args.backupPolicyName); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewBackupPVCNameIsEmpty() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNewBackupPVTemplateNotFound(t *testing.T) { + type args struct { + cmName string + cmNamespace string + } + tests := []struct { + name string + args args + want *intctrlutil.Error + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewBackupPVTemplateNotFound(tt.args.cmName, tt.args.cmNamespace); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewBackupPVTemplateNotFound() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNewBackupScheduleDisabled(t *testing.T) { + type args struct { + backupType string + backupPolicyName string + } + tests := []struct { + name string + args args + want *intctrlutil.Error + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewBackupScheduleDisabled(tt.args.backupType, tt.args.backupPolicyName); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewBackupScheduleDisabled() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNewInvalidLogfileBackupName(t *testing.T) { + type args struct { + backupPolicyName string + } + tests := []struct { + name string + args args + want *intctrlutil.Error + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewInvalidLogfileBackupName(tt.args.backupPolicyName); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewInvalidLogfileBackupName() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/dataprotection/restore/builder.go b/internal/dataprotection/restore/builder.go new file mode 100644 index 00000000000..128cbd28274 --- /dev/null +++ b/internal/dataprotection/restore/builder.go @@ -0,0 +1,294 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package restore + +import ( + "fmt" + "strconv" + "strings" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils" +) + +type restoreJobBuilder struct { + restore *dpv1alpha1.Restore + stage dpv1alpha1.RestoreStage + backupSet BackupActionSet + env []corev1.EnvVar + commonVolumes []corev1.Volume + commonVolumeMounts []corev1.VolumeMount + // specificVolumes should be rebuilt for each job. + specificVolumes []corev1.Volume + // specificVolumeMounts should be rebuilt for each job. + specificVolumeMounts []corev1.VolumeMount + image string + command []string + tolerations []corev1.Toleration + nodeSelector map[string]string +} + +func newRestoreJobBuilder(restore *dpv1alpha1.Restore, backupSet BackupActionSet, stage dpv1alpha1.RestoreStage) *restoreJobBuilder { + return &restoreJobBuilder{ + restore: restore, + backupSet: backupSet, + stage: stage, + commonVolumes: []corev1.Volume{}, + commonVolumeMounts: []corev1.VolumeMount{}, + } +} + +func (r *restoreJobBuilder) buildPVCVolumeAndMount( + claim dpv1alpha1.RestoreVolumeClaim, + identifier string) (*corev1.Volume, *corev1.VolumeMount, error) { + volumeName := fmt.Sprintf("%s-%s", identifier, claim.Name) + volume := &corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: claim.Name}}, + } + volumeMount := &corev1.VolumeMount{Name: volumeName} + if claim.MountPath != "" { + volumeMount.MountPath = claim.MountPath + return volume, volumeMount, nil + } + mountPath := getMountPathWithSourceVolume(r.backupSet.Backup, claim.VolumeSource) + if mountPath != "" { + volumeMount.MountPath = mountPath + return volume, volumeMount, nil + } + + if r.backupSet.UseVolumeSnapshot && !r.backupSet.ActionSet.HasPrepareDataStage() { + return nil, nil, nil + } + return nil, nil, intctrlutil.NewFatalError(fmt.Sprintf(`unable to find the mountPath corresponding to volumeSource "%s" from status.backupMethod.targetVolumes.volumeMounts of backup "%s"`, + claim.VolumeSource, r.backupSet.Backup.Name)) +} + +// addBackupVolumeAndMount adds the volume and volumeMount of backup pvc to common volumes and volumeMounts slice. +func (r *restoreJobBuilder) addBackupVolumeAndMount() *restoreJobBuilder { + if r.backupSet.Backup.Status.PersistentVolumeClaimName != "" { + backupName := r.backupSet.Backup.Name + r.commonVolumes = append(r.commonVolumes, corev1.Volume{ + Name: backupName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: r.backupSet.Backup.Status.PersistentVolumeClaimName}, + }, + }) + r.commonVolumeMounts = append(r.commonVolumeMounts, corev1.VolumeMount{ + Name: backupName, + MountPath: "/" + backupName, + }) + } + return r +} + +// addToCommonVolumesAndMounts adds the volume and volumeMount to common volumes and volumeMounts slice. +func (r *restoreJobBuilder) addToCommonVolumesAndMounts(volume *corev1.Volume, volumeMount *corev1.VolumeMount) *restoreJobBuilder { + if volume != nil { + r.commonVolumes = append(r.commonVolumes, *volume) + } + if volumeMount != nil { + r.commonVolumeMounts = append(r.commonVolumeMounts, *volumeMount) + } + return r +} + +// resetSpecificVolumesAndMounts resets the specific volumes and volumeMounts slice. +func (r *restoreJobBuilder) resetSpecificVolumesAndMounts() { + r.specificVolumes = []corev1.Volume{} + r.specificVolumeMounts = []corev1.VolumeMount{} +} + +// addToSpecificVolumesAndMounts adds the volume and volumeMount to specific volumes and volumeMounts slice. +func (r *restoreJobBuilder) addToSpecificVolumesAndMounts(volume *corev1.Volume, volumeMount *corev1.VolumeMount) *restoreJobBuilder { + if volume != nil { + r.specificVolumes = append(r.specificVolumes, *volume) + } + if volumeMount != nil { + r.specificVolumeMounts = append(r.specificVolumeMounts, *volumeMount) + } + return r +} + +func (r *restoreJobBuilder) setImage(image string) *restoreJobBuilder { + r.image = image + return r +} + +func (r *restoreJobBuilder) setCommand(command []string) *restoreJobBuilder { + r.command = command + return r +} + +func (r *restoreJobBuilder) setToleration(tolerations []corev1.Toleration) *restoreJobBuilder { + r.tolerations = tolerations + return r +} + +func (r *restoreJobBuilder) setNodeNameToNodeSelector(nodeName string) *restoreJobBuilder { + r.nodeSelector = map[string]string{ + corev1.LabelHostname: nodeName, + } + return r +} + +// addCommonEnv adds the common envs for each restore job. +func (r *restoreJobBuilder) addCommonEnv() *restoreJobBuilder { + backupName := r.backupSet.Backup.Name + // add backupName env + r.env = []corev1.EnvVar{{Name: dptypes.DPBackupName, Value: backupName}} + // add mount path env of backup dir + filePath := r.backupSet.Backup.Status.Path + if filePath != "" { + r.env = append(r.env, corev1.EnvVar{Name: dptypes.DPBackupDIR, Value: fmt.Sprintf("/%s%s", backupName, filePath)}) + // TODO: add continuous file path env + } + // add time env + actionSetEnv := r.backupSet.ActionSet.Spec.Env + timeFormat := getTimeFormat(r.backupSet.ActionSet.Spec.Env) + appendTimeEnv := func(envName, envTimestampName string, targetTime *metav1.Time) { + if targetTime.IsZero() { + return + } + if envName != "" { + r.env = append(r.env, corev1.EnvVar{Name: envName, Value: targetTime.UTC().Format(timeFormat)}) + } + if envTimestampName != "" { + r.env = append(r.env, corev1.EnvVar{Name: envTimestampName, Value: strconv.FormatInt(targetTime.Unix(), 10)}) + } + } + appendTimeEnv(dptypes.DPBackupStopTime, "", r.backupSet.Backup.GetEndTime()) + if r.restore.Spec.RestoreTime != "" { + restoreTime, _ := time.Parse(time.RFC3339, r.restore.Spec.RestoreTime) + appendTimeEnv(DPRestoreTime, DPRestoreTimestamp, &metav1.Time{Time: restoreTime}) + } + // append actionSet env + r.env = append(r.env, actionSetEnv...) + backupMethod := r.backupSet.Backup.Status.BackupMethod + if backupMethod != nil && len(backupMethod.Env) > 0 { + r.env = utils.MergeEnv(r.env, backupMethod.Env) + } + // merge the restore env + r.env = utils.MergeEnv(r.env, r.restore.Spec.Env) + return r +} + +func (r *restoreJobBuilder) addTargetPodAndCredentialEnv(pod *corev1.Pod, connectCredential *dpv1alpha1.ConnectCredential) *restoreJobBuilder { + if pod == nil { + return r + } + var env []corev1.EnvVar + // Note: now only add the first container envs. + if len(pod.Spec.Containers) != 0 { + env = pod.Spec.Containers[0].Env + } + env = append(env, corev1.EnvVar{Name: dptypes.DPDBHost, Value: intctrlutil.BuildPodHostDNS(pod)}) + if connectCredential != nil { + appendEnvFromSecret := func(envName, keyName string) { + if keyName != "" { + env = append(env, corev1.EnvVar{Name: envName, ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: connectCredential.SecretName, + }, + Key: keyName, + }, + }}) + } + } + appendEnvFromSecret(dptypes.DPDBUser, connectCredential.UsernameKey) + appendEnvFromSecret(dptypes.DPDBPassword, connectCredential.PasswordKey) + appendEnvFromSecret(dptypes.DPDBPort, connectCredential.PortKey) + if connectCredential.HostKey != "" { + appendEnvFromSecret(dptypes.DPDBHost, connectCredential.HostKey) + } + } + r.env = utils.MergeEnv(r.env, env) + return r +} + +// builderRestoreJobName builds restore job name. +func (r *restoreJobBuilder) builderRestoreJobName(jobIndex int) string { + jobName := fmt.Sprintf("restore-%s-%s-%s-%d", strings.ToLower(string(r.stage)), r.restore.UID[:8], r.backupSet.Backup.Name, jobIndex) + l := len(jobName) + if l > 63 { + return fmt.Sprintf("%s-%s", jobName[:57], jobName[l-5:l]) + } + return jobName +} + +// build the restore job by this builder. +func (r *restoreJobBuilder) build(jobIndex int) *batchv1.Job { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.builderRestoreJobName(jobIndex), + Namespace: r.restore.Namespace, + Labels: BuildRestoreLabels(r.restore.Name), + }, + } + podSpec := job.Spec.Template.Spec + // 1. set pod spec + runUser := int64(0) + podSpec.SecurityContext = &corev1.PodSecurityContext{ + RunAsUser: &runUser, + } + podSpec.RestartPolicy = corev1.RestartPolicyOnFailure + if r.stage == dpv1alpha1.PrepareData { + // set scheduling spec + schedulingSpec := r.restore.Spec.PrepareDataConfig.SchedulingSpec + podSpec.Tolerations = schedulingSpec.Tolerations + podSpec.Affinity = schedulingSpec.Affinity + podSpec.NodeSelector = schedulingSpec.NodeSelector + podSpec.NodeName = schedulingSpec.NodeName + podSpec.SchedulerName = schedulingSpec.SchedulerName + podSpec.TopologySpreadConstraints = schedulingSpec.TopologySpreadConstraints + } else { + podSpec.Tolerations = r.tolerations + podSpec.NodeSelector = r.nodeSelector + } + r.specificVolumes = append(r.specificVolumes, r.commonVolumes...) + podSpec.Volumes = r.specificVolumes + job.Spec.Template.Spec = podSpec + job.Spec.BackoffLimit = &defaultBackoffLimit + + // 2. set restore container + r.specificVolumeMounts = append(r.specificVolumeMounts, r.commonVolumeMounts...) + container := corev1.Container{ + Name: Restore, + Resources: r.restore.Spec.ContainerResources, + Env: r.env, + VolumeMounts: r.specificVolumeMounts, + Command: r.command, + Image: r.image, + ImagePullPolicy: corev1.PullIfNotPresent, + } + intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) + job.Spec.Template.Spec.Containers = []corev1.Container{container} + controllerutil.AddFinalizer(job, dptypes.DataProtectionFinalizerName) + return job +} diff --git a/internal/dataprotection/restore/manager.go b/internal/dataprotection/restore/manager.go new file mode 100644 index 00000000000..cf0ae1dc768 --- /dev/null +++ b/internal/dataprotection/restore/manager.go @@ -0,0 +1,513 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package restore + +import ( + "fmt" + "sort" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" +) + +type BackupActionSet struct { + Backup *dpv1alpha1.Backup + ActionSet *dpv1alpha1.ActionSet + UseVolumeSnapshot bool +} + +type RestoreManager struct { + OriginalRestore *dpv1alpha1.Restore + Restore *dpv1alpha1.Restore + PrepareDataBackupSets []BackupActionSet + PostReadyBackupSets []BackupActionSet + Schema *runtime.Scheme + Recorder record.EventRecorder +} + +func NewRestoreManager(restore *dpv1alpha1.Restore, recorder record.EventRecorder, schema *runtime.Scheme) *RestoreManager { + return &RestoreManager{ + OriginalRestore: restore.DeepCopy(), + Restore: restore, + PrepareDataBackupSets: []BackupActionSet{}, + PostReadyBackupSets: []BackupActionSet{}, + Schema: schema, + Recorder: recorder, + } +} + +// GetBackupActionSetByNamespaced gets the BackupActionSet by name and namespace of backup. +func (r *RestoreManager) GetBackupActionSetByNamespaced(reqCtx intctrlutil.RequestCtx, + cli client.Client, + backupName, + namespace string) (*BackupActionSet, error) { + backup := &dpv1alpha1.Backup{} + if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: namespace, Name: backupName}, backup); err != nil { + if apierrors.IsNotFound(err) { + err = intctrlutil.NewFatalError(err.Error()) + } + return nil, err + } + backupMethod := backup.Status.BackupMethod + if backupMethod == nil { + return nil, intctrlutil.NewFatalError(fmt.Sprintf(`status.backupMethod of backup "%s" is empty`, backupName)) + } + useVolumeSnapshot := backupMethod.SnapshotVolumes != nil && *backupMethod.SnapshotVolumes + actionSet, err := utils.GetActionSetByName(reqCtx, cli, backup.Status.BackupMethod.ActionSetName) + if err != nil { + return nil, err + } + return &BackupActionSet{Backup: backup, ActionSet: actionSet, UseVolumeSnapshot: useVolumeSnapshot}, nil +} + +// BuildDifferentialBackupActionSets builds the backupActionSets for specified incremental backup. +func (r *RestoreManager) BuildDifferentialBackupActionSets(reqCtx intctrlutil.RequestCtx, cli client.Client, sourceBackupSet BackupActionSet) error { + parentBackupSet, err := r.GetBackupActionSetByNamespaced(reqCtx, cli, sourceBackupSet.Backup.Spec.ParentBackupName, sourceBackupSet.Backup.Namespace) + if err != nil || parentBackupSet == nil { + return err + } + r.SetBackupSets(*parentBackupSet, sourceBackupSet) + return nil +} + +// BuildIncrementalBackupActionSets builds the backupActionSets for specified incremental backup. +func (r *RestoreManager) BuildIncrementalBackupActionSets(reqCtx intctrlutil.RequestCtx, cli client.Client, sourceBackupSet BackupActionSet) error { + r.SetBackupSets(sourceBackupSet) + if sourceBackupSet.ActionSet != nil && sourceBackupSet.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeIncremental { + // get the parent BackupActionSet for incremental. + backupSet, err := r.GetBackupActionSetByNamespaced(reqCtx, cli, sourceBackupSet.Backup.Spec.ParentBackupName, sourceBackupSet.Backup.Namespace) + if err != nil || backupSet == nil { + return err + } + return r.BuildIncrementalBackupActionSets(reqCtx, cli, *backupSet) + } + // if reaches full backup, sort the BackupActionSets and return + sortBackupSets := func(backupSets []BackupActionSet, reverse bool) []BackupActionSet { + sort.Slice(backupSets, func(i, j int) bool { + if reverse { + i, j = j, i + } + backupI := backupSets[i].Backup + backupJ := backupSets[j].Backup + if backupI == nil { + return false + } + if backupJ == nil { + return true + } + return compareWithBackupStopTime(*backupI, *backupJ) + }) + return backupSets + } + r.PrepareDataBackupSets = sortBackupSets(r.PrepareDataBackupSets, false) + r.PostReadyBackupSets = sortBackupSets(r.PostReadyBackupSets, false) + return nil +} + +func (r *RestoreManager) SetBackupSets(backupSets ...BackupActionSet) { + for i := range backupSets { + if backupSets[i].UseVolumeSnapshot { + r.PrepareDataBackupSets = append(r.PrepareDataBackupSets, backupSets[i]) + continue + } + if backupSets[i].ActionSet == nil || backupSets[i].ActionSet.Spec.Restore == nil { + continue + } + if backupSets[i].ActionSet.Spec.Restore.PrepareData != nil { + r.PrepareDataBackupSets = append(r.PrepareDataBackupSets, backupSets[i]) + } + + if len(backupSets[i].ActionSet.Spec.Restore.PostReady) > 0 { + r.PostReadyBackupSets = append(r.PostReadyBackupSets, backupSets[i]) + } + } +} + +// AnalysisRestoreActionsWithBackup analysis the restore actions progress group by backup. +// check if the restore jobs are completed or failed or processing. +func (r *RestoreManager) AnalysisRestoreActionsWithBackup(stage dpv1alpha1.RestoreStage, backupName string, actionName string) (bool, bool) { + var ( + restoreActionCount int + finishedActionCount int + exitFailedAction bool + ) + restoreActions := r.Restore.Status.Actions.PostReady + if stage == dpv1alpha1.PrepareData { + restoreActions = r.Restore.Status.Actions.PrepareData + // if the stage is prepareData, actionCount keeps up with pvc count. + restoreActionCount = GetRestoreActionsCountForPrepareData(r.Restore.Spec.PrepareDataConfig) + } + for i := range restoreActions { + if restoreActions[i].BackupName != backupName && restoreActions[i].Name != actionName { + continue + } + // if the stage is PostReady, actionCount keeps up with actions + if stage == dpv1alpha1.PostReady { + restoreActionCount += 1 + } + switch restoreActions[i].Status { + case dpv1alpha1.RestoreActionFailed: + finishedActionCount += 1 + exitFailedAction = true + case dpv1alpha1.RestoreActionCompleted: + finishedActionCount += 1 + } + } + + allActionsFinished := restoreActionCount > 0 && finishedActionCount == restoreActionCount + return allActionsFinished, exitFailedAction +} + +func (r *RestoreManager) RestorePVCFromSnapshot(reqCtx intctrlutil.RequestCtx, cli client.Client, backupSet BackupActionSet, actionName string) error { + prepareDataConfig := r.Restore.Spec.PrepareDataConfig + if prepareDataConfig == nil { + return nil + } + createPVCWithSnapshot := func(claim dpv1alpha1.RestoreVolumeClaim) error { + if claim.VolumeSource == "" { + return intctrlutil.NewFatalError(fmt.Sprintf(`claim "%s"" volumeSource can not be empty if the backup uses volume snapshot`, claim.Name)) + } + // get volumeSnapshot by backup and volumeSource. + claim.VolumeClaimSpec.DataSource = &corev1.TypedLocalObjectReference{ + Name: utils.GetBackupVolumeSnapshotName(backupSet.Backup.Name, claim.VolumeSource), + Kind: constant.VolumeSnapshotKind, + APIGroup: &volumeSnapshotGroup, + } + return r.createPVCIfNotExist(reqCtx, cli, claim.ObjectMeta, claim.VolumeClaimSpec) + } + + for _, claim := range prepareDataConfig.RestoreVolumeClaims { + if err := createPVCWithSnapshot(claim); err != nil { + return err + } + } + claimTemplate := prepareDataConfig.RestoreVolumeClaimsTemplate + + if claimTemplate != nil { + restoreJobReplicas := GetRestoreActionsCountForPrepareData(prepareDataConfig) + for i := 0; i < restoreJobReplicas; i++ { + // create pvc from claims template, build volumes and volumeMounts + for _, claim := range prepareDataConfig.RestoreVolumeClaimsTemplate.Templates { + claim.Name = fmt.Sprintf("%s-%d", claim.Name, i+int(claimTemplate.StartingIndex)) + if err := createPVCWithSnapshot(claim); err != nil { + return err + } + } + } + } + // NOTE: do not to record status action for restoring from snapshot. it is not defined in ActionSet. + return nil +} + +// BuildPrepareDataJobs builds the restore jobs for prepare pvc's data, and will create the target pvcs if not exist. +func (r *RestoreManager) BuildPrepareDataJobs(reqCtx intctrlutil.RequestCtx, cli client.Client, backupSet BackupActionSet, actionName string) ([]*batchv1.Job, error) { + prepareDataConfig := r.Restore.Spec.PrepareDataConfig + if prepareDataConfig == nil { + return nil, nil + } + if !backupSet.ActionSet.HasPrepareDataStage() { + return nil, nil + } + jobBuilder := newRestoreJobBuilder(r.Restore, backupSet, dpv1alpha1.PrepareData). + setImage(backupSet.ActionSet.Spec.Restore.PrepareData.Image). + setCommand(backupSet.ActionSet.Spec.Restore.PrepareData.Command). + addBackupVolumeAndMount(). + addCommonEnv() + + createPVCIfNoteExistsAndBuildVolume := func(claim dpv1alpha1.RestoreVolumeClaim, identifier string) (*corev1.Volume, *corev1.VolumeMount, error) { + if err := r.createPVCIfNotExist(reqCtx, cli, claim.ObjectMeta, claim.VolumeClaimSpec); err != nil { + return nil, nil, err + } + return jobBuilder.buildPVCVolumeAndMount(claim, identifier) + } + + // create pvc from volumeClaims, set volume and volumeMount to jobBuilder + for _, claim := range prepareDataConfig.RestoreVolumeClaims { + volume, volumeMount, err := createPVCIfNoteExistsAndBuildVolume(claim, "dp-claim") + if err != nil { + return nil, err + } + jobBuilder.addToCommonVolumesAndMounts(volume, volumeMount) + } + + var ( + restoreJobs []*batchv1.Job + restoreJobReplicas = GetRestoreActionsCountForPrepareData(prepareDataConfig) + claimsTemplate = prepareDataConfig.RestoreVolumeClaimsTemplate + ) + + if prepareDataConfig.IsSerialPolicy() { + // obtain the PVC serial number that needs to be restored + currentOrder := 1 + prepareActions := r.Restore.Status.Actions.PrepareData + for i := range prepareActions { + if prepareActions[i].BackupName != backupSet.Backup.Name && prepareActions[i].Name != actionName { + continue + } + if prepareActions[i].Status == dpv1alpha1.RestoreActionCompleted && currentOrder < restoreJobReplicas { + currentOrder += 1 + if prepareDataConfig.IsSerialPolicy() { + // if the restore policy is Serial, should delete the completed job to release the pvc. + if err := deleteRestoreJob(reqCtx, cli, prepareActions[i].ObjectKey, r.Restore.Namespace); err != nil { + return nil, err + } + } + } + } + restoreJobReplicas = currentOrder + } + + // build restore job to prepare pvc's data + for i := 0; i < restoreJobReplicas; i++ { + // reset specific volumes and volumeMounts + jobBuilder.resetSpecificVolumesAndMounts() + if claimsTemplate != nil { + // create pvc from claims template, build volumes and volumeMounts + for _, claim := range claimsTemplate.Templates { + claim.Name = fmt.Sprintf("%s-%d", claim.Name, i+int(claimsTemplate.StartingIndex)) + volume, volumeMount, err := createPVCIfNoteExistsAndBuildVolume(claim, "dp-claim-tpl") + if err != nil { + return nil, err + } + jobBuilder.addToSpecificVolumesAndMounts(volume, volumeMount) + } + } + // build job and append + job := jobBuilder.build(i) + if prepareDataConfig.IsSerialPolicy() && + restoreJobHasCompleted(r.Restore.Status.Actions.PrepareData, job.Name) { + // if the job has completed and the restore policy is Serial, continue + continue + } + restoreJobs = append(restoreJobs, job) + } + return restoreJobs, nil +} + +// BuildPostReadyActionJobs builds the post ready jobs. +func (r *RestoreManager) BuildPostReadyActionJobs(reqCtx intctrlutil.RequestCtx, cli client.Client, backupSet BackupActionSet, actionSpec dpv1alpha1.ActionSpec) ([]*batchv1.Job, error) { + readyConfig := r.Restore.Spec.ReadyConfig + if readyConfig == nil { + return nil, nil + } + if !backupSet.ActionSet.HasPostReadyStage() { + return nil, nil + } + getTargetPodList := func(labelSelector metav1.LabelSelector, msgKey string) ([]corev1.Pod, error) { + targetPodList, err := utils.GetPodListByLabelSelector(reqCtx, cli, labelSelector) + if err != nil { + return nil, err + } + if len(targetPodList.Items) == 0 { + return nil, intctrlutil.NewFatalError(fmt.Sprintf("can not found any pod by spec.readyConfig.%s.target.podSelector", msgKey)) + } + return targetPodList.Items, nil + } + + jobBuilder := newRestoreJobBuilder(r.Restore, backupSet, dpv1alpha1.PostReady). + addBackupVolumeAndMount(). + addCommonEnv() + + buildJobsForJobAction := func() ([]*batchv1.Job, error) { + jobAction := r.Restore.Spec.ReadyConfig.JobAction + if jobAction == nil { + return nil, intctrlutil.NewFatalError("spec.readyConfig.jobAction can not be empty") + } + targetPodList, err := getTargetPodList(jobAction.Target.PodSelector, "jobAction") + if err != nil { + return nil, err + } + targetPod := targetPodList[0] + for _, volumeMount := range jobAction.Target.VolumeMounts { + for _, volume := range targetPod.Spec.Volumes { + if volume.Name != volumeMount.Name { + continue + } + jobBuilder.addToSpecificVolumesAndMounts(&volume, &volumeMount) + } + } + if boolptr.IsSetToTrue(actionSpec.Job.RunOnTargetPodNode) { + jobBuilder.setNodeNameToNodeSelector(targetPod.Spec.NodeName) + } + job := jobBuilder.setImage(actionSpec.Job.Image). + setCommand(actionSpec.Job.Command). + setToleration(targetPod.Spec.Tolerations). + addTargetPodAndCredentialEnv(&targetPod, r.Restore.Spec.ReadyConfig.ConnectCredential).build(0) + return []*batchv1.Job{job}, nil + } + + buildJobsForExecAction := func() ([]*batchv1.Job, error) { + execAction := r.Restore.Spec.ReadyConfig.ExecAction + if execAction == nil { + return nil, intctrlutil.NewFatalError("spec.readyConfig.execAction can not be empty") + } + targetPodList, err := getTargetPodList(execAction.Target.PodSelector, "execAction") + if err != nil { + return nil, err + } + var restoreJobs []*batchv1.Job + for i := range targetPodList { + containerName := actionSpec.Exec.Container + if containerName == "" { + containerName = targetPodList[i].Spec.Containers[0].Name + } + command := fmt.Sprintf("kubectl -n %s exec -it pod/%s -c %s -- %s", targetPodList[i].Namespace, targetPodList[i].Name, containerName, actionSpec.Exec.Command) + jobBuilder.setImage(constant.KBToolsImage).setCommand([]string{"sh", "-c", command}). + setToleration(targetPodList[i].Spec.Tolerations). + addTargetPodAndCredentialEnv(&targetPodList[i], r.Restore.Spec.ReadyConfig.ConnectCredential) + restoreJobs = append(restoreJobs, jobBuilder.build(i)) + } + return restoreJobs, nil + } + + if actionSpec.Job != nil { + return buildJobsForJobAction() + } + return buildJobsForExecAction() +} + +func (r *RestoreManager) createPVCIfNotExist( + reqCtx intctrlutil.RequestCtx, + cli client.Client, + claimMetadata metav1.ObjectMeta, + claimSpec corev1.PersistentVolumeClaimSpec) error { + claimMetadata.Namespace = reqCtx.Req.Namespace + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: claimMetadata, + Spec: claimSpec, + } + tmpPVC := &corev1.PersistentVolumeClaim{} + if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Name: claimMetadata.Name, Namespace: claimMetadata.Namespace}, tmpPVC); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + msg := fmt.Sprintf("created pvc %s/%s", pvc.Namespace, pvc.Name) + r.Recorder.Event(r.Restore, corev1.EventTypeNormal, reasonCreateRestorePVC, msg) + if err = cli.Create(reqCtx.Ctx, pvc); err != nil { + return client.IgnoreAlreadyExists(err) + } + } + return nil +} + +// CreateJobsIfNotExist creates the jobs if not exist. +func (r *RestoreManager) CreateJobsIfNotExist(reqCtx intctrlutil.RequestCtx, + cli client.Client, + objs []*batchv1.Job) ([]*batchv1.Job, error) { + // creates jobs if not exist + var fetchedJobs []*batchv1.Job + for i := range objs { + if objs[i] == nil { + continue + } + fetchedJob := &batchv1.Job{} + if err := cli.Get(reqCtx.Ctx, client.ObjectKeyFromObject(objs[i]), fetchedJob); err != nil { + if !apierrors.IsNotFound(err) { + return nil, err + } + if err = controllerutil.SetControllerReference(r.Restore, objs[i], r.Schema); err != nil { + return nil, err + } + if err = cli.Create(reqCtx.Ctx, objs[i]); err != nil && !apierrors.IsAlreadyExists(err) { + return nil, err + } + msg := fmt.Sprintf("created job %s/%s", objs[i].Namespace, objs[i].Name) + r.Recorder.Event(r.Restore, corev1.EventTypeNormal, reasonCreateRestoreJob, msg) + fetchedJobs = append(fetchedJobs, objs[i]) + } else { + fetchedJobs = append(fetchedJobs, fetchedJob) + } + } + return fetchedJobs, nil +} + +// CheckJobsDone checks if jobs are completed or failed. +func (r *RestoreManager) CheckJobsDone( + stage dpv1alpha1.RestoreStage, + actionName string, + backupSet BackupActionSet, + fetchedJobs []*batchv1.Job) (bool, bool) { + var ( + allJobFinished = true + existFailedJob bool + ) + restoreActions := &r.Restore.Status.Actions.PrepareData + if stage == dpv1alpha1.PostReady { + restoreActions = &r.Restore.Status.Actions.PostReady + } + for i := range fetchedJobs { + statusAction := dpv1alpha1.RestoreStatusAction{ + Name: actionName, + ObjectKey: buildJobKeyForActionStatus(fetchedJobs[i].Name), + BackupName: backupSet.Backup.Name, + } + if done, err := checkJobDone(fetchedJobs[i]); err != nil { + existFailedJob = true + statusAction.Status = dpv1alpha1.RestoreActionFailed + statusAction.Message = err.Error() + SetRestoreStatusAction(restoreActions, statusAction) + } else if done { + statusAction.Status = dpv1alpha1.RestoreActionCompleted + SetRestoreStatusAction(restoreActions, statusAction) + } else { + allJobFinished = false + statusAction.Status = dpv1alpha1.RestoreActionProcessing + SetRestoreStatusAction(restoreActions, statusAction) + } + } + return allJobFinished, existFailedJob +} + +// Recalculation whether all actions have been completed. +func (r *RestoreManager) Recalculation(backupName, actionName string, allActionsFinished, existFailedAction *bool) { + prepareDataConfig := r.Restore.Spec.PrepareDataConfig + if !prepareDataConfig.IsSerialPolicy() { + return + } + + if *existFailedAction { + // under the Serial policy, restore will be failed if any action is failed. + *allActionsFinished = true + return + } + var actionCount int + for _, v := range r.Restore.Status.Actions.PrepareData { + if v.Name == actionName && v.BackupName == backupName { + actionCount += 1 + } + } + if actionCount != GetRestoreActionsCountForPrepareData(prepareDataConfig) { + // if the number of actions is not equal to the number of target actions, the recovery has not yet ended + *allActionsFinished = false + } +} diff --git a/internal/dataprotection/restore/types.go b/internal/dataprotection/restore/types.go new file mode 100644 index 00000000000..338fb3c1ab2 --- /dev/null +++ b/internal/dataprotection/restore/types.go @@ -0,0 +1,60 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package restore + +var volumeSnapshotGroup = "snapshot.storage.k8s.io" + +// Restore condition constants +const ( + // condition types + ConditionTypeRestoreValidationPassed = "ValidationPassed" + ConditionTypeRestorePreparedData = "PrepareData" + ConditionTypeReadinessProbe = "ReadinessProbe" + ConditionTypeRestorePostReady = "PostReady" + + // condition reasons + ReasonRestoreStarting = "RestoreStarting" + ReasonRestoreCompleted = "RestoreCompleted" + ReasonRestoreFailed = "RestoreFailed" + ReasonValidateFailed = "ValidateFailed" + ReasonValidateSuccessfully = "ValidateSuccessfully" + ReasonProcessing = "Processing" + ReasonFailed = "Failed" + ReasonSucceed = "Succeed" + reasonCreateRestoreJob = "CreateRestoreJob" + reasonCreateRestorePVC = "CreateRestorePVC" +) + +// labels key +const ( + DataProtectionLabelRestoreKey = "dataprotection.kubeblocks.io/restore" +) + +// env name for restore + +const ( + DPRestoreTime = "DP_RESTORE_TIME" + DPRestoreTimestamp = "DP_RESTORE_TIMESTAMP" +) + +// Restore constant +const Restore = "restore" + +var defaultBackoffLimit int32 = 3 diff --git a/internal/dataprotection/restore/utils.go b/internal/dataprotection/restore/utils.go new file mode 100644 index 00000000000..d684a0a2419 --- /dev/null +++ b/internal/dataprotection/restore/utils.go @@ -0,0 +1,218 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package restore + +import ( + "fmt" + "strings" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" +) + +func SetRestoreCondition(restore *dpv1alpha1.Restore, status metav1.ConditionStatus, conditionType, reason, message string) { + condition := metav1.Condition{ + Type: conditionType, + Reason: reason, + Message: message, + Status: status, + } + meta.SetStatusCondition(&restore.Status.Conditions, condition) +} + +// SetRestoreValidationCondition sets restore condition which type is ConditionTypeRestoreValidationPassed. +func SetRestoreValidationCondition(restore *dpv1alpha1.Restore, reason, message string) { + status := metav1.ConditionFalse + if reason == ReasonValidateSuccessfully { + status = metav1.ConditionTrue + } + SetRestoreCondition(restore, status, ConditionTypeRestoreValidationPassed, reason, message) +} + +// SetRestoreStageCondition sets restore stage condition. +func SetRestoreStageCondition(restore *dpv1alpha1.Restore, stage dpv1alpha1.RestoreStage, reason, message string) { + status := metav1.ConditionFalse + if reason == ReasonSucceed { + status = metav1.ConditionTrue + } + conditionType := ConditionTypeRestorePreparedData + if stage == dpv1alpha1.PostReady { + conditionType = ConditionTypeRestorePostReady + } + SetRestoreCondition(restore, status, conditionType, reason, message) +} + +func FindRestoreStatusAction(actions []dpv1alpha1.RestoreStatusAction, key string) *dpv1alpha1.RestoreStatusAction { + for i := range actions { + if actions[i].ObjectKey == key { + return &actions[i] + } + } + return nil +} + +func SetRestoreStatusAction(actions *[]dpv1alpha1.RestoreStatusAction, + statusAction dpv1alpha1.RestoreStatusAction) { + if actions == nil { + return + } + if statusAction.Message == "" { + switch statusAction.Status { + case dpv1alpha1.RestoreActionProcessing: + statusAction.Message = fmt.Sprintf(`"%s" is processing`, statusAction.ObjectKey) + case dpv1alpha1.RestoreActionCompleted: + statusAction.Message = fmt.Sprintf(`successfully processed the "%s"`, statusAction.ObjectKey) + case dpv1alpha1.RestoreActionFailed: + statusAction.Message = fmt.Sprintf(`"%s" is failed, you can describe it or logs the ownered pod to get more informations`, statusAction.ObjectKey) + } + } + if statusAction.Status != dpv1alpha1.RestoreActionProcessing { + statusAction.EndTime = metav1.Now() + } + existingAction := FindRestoreStatusAction(*actions, statusAction.ObjectKey) + if existingAction == nil { + statusAction.StartTime = metav1.Now() + *actions = append(*actions, statusAction) + return + } + if existingAction.Status != statusAction.Status { + existingAction.Status = statusAction.Status + existingAction.EndTime = statusAction.EndTime + existingAction.Message = statusAction.Message + } +} + +func GetRestoreActionsCountForPrepareData(config *dpv1alpha1.PrepareDataConfig) int { + if config == nil { + return 0 + } + count := 1 + if config.RestoreVolumeClaimsTemplate != nil { + count = int(config.RestoreVolumeClaimsTemplate.Replicas) + } + return count +} + +func BuildRestoreLabels(restoreName string) map[string]string { + return map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + DataProtectionLabelRestoreKey: restoreName, + } +} + +func GetRestoreDuration(status dpv1alpha1.RestoreStatus) *metav1.Duration { + if status.CompletionTimestamp == nil || status.StartTimestamp == nil { + return nil + } + return &metav1.Duration{Duration: status.CompletionTimestamp.Sub(status.StartTimestamp.Time).Round(time.Second)} +} + +func getTimeFormat(envs []corev1.EnvVar) string { + for _, env := range envs { + if env.Name == dptypes.DPTimeFormat { + return env.Value + } + } + return time.RFC3339 +} + +// checkJobDone if the job is completed or failed, return true. +// if the job is failed, return an error to describe the failed message. +func checkJobDone(job *batchv1.Job) (bool, error) { + if job == nil { + return false, nil + } + for _, condition := range job.Status.Conditions { + if condition.Type == batchv1.JobComplete { + return true, nil + } else if condition.Type == batchv1.JobFailed { + return true, fmt.Errorf(condition.Reason + ": " + condition.Message) + } + } + return false, nil +} + +func compareWithBackupStopTime(backupI, backupJ dpv1alpha1.Backup) bool { + endTimeI := backupI.GetEndTime() + endTimeJ := backupJ.GetEndTime() + if endTimeI.IsZero() { + return false + } + if endTimeJ.IsZero() { + return true + } + if endTimeI.Equal(endTimeJ) { + return backupI.Name < backupJ.Name + } + return endTimeI.Before(endTimeJ) +} + +func buildJobKeyForActionStatus(jobName string) string { + return fmt.Sprintf("%s/%s", constant.JobKind, jobName) +} + +func getMountPathWithSourceVolume(backup *dpv1alpha1.Backup, volumeSource string) string { + backupMethod := backup.Status.BackupMethod + if backupMethod != nil && backupMethod.TargetVolumes != nil { + for _, v := range backupMethod.TargetVolumes.VolumeMounts { + if v.Name == volumeSource { + return v.MountPath + } + } + } + return "" +} + +func restoreJobHasCompleted(statusActions []dpv1alpha1.RestoreStatusAction, jobName string) bool { + jobKey := buildJobKeyForActionStatus(jobName) + for i := range statusActions { + if statusActions[i].ObjectKey == jobKey && statusActions[i].Status == dpv1alpha1.RestoreActionCompleted { + return true + } + } + return false +} + +func deleteRestoreJob(reqCtx intctrlutil.RequestCtx, cli client.Client, jobKey string, namespace string) error { + jobName := strings.ReplaceAll(jobKey, fmt.Sprintf("%s/", constant.JobKind), "") + job := &batchv1.Job{} + if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Name: jobName, Namespace: namespace}, job); err != nil { + return client.IgnoreNotFound(err) + } + if controllerutil.ContainsFinalizer(job, dptypes.DataProtectionFinalizerName) { + patch := client.MergeFrom(job.DeepCopy()) + controllerutil.RemoveFinalizer(job, dptypes.DataProtectionFinalizerName) + if err := cli.Patch(reqCtx.Ctx, job, patch); err != nil { + return err + } + } + return intctrlutil.BackgroundDeleteObject(cli, reqCtx.Ctx, job) +} diff --git a/internal/dataprotection/types/constant.go b/internal/dataprotection/types/constant.go new file mode 100644 index 00000000000..3254e3feb61 --- /dev/null +++ b/internal/dataprotection/types/constant.go @@ -0,0 +1,99 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package types + +const ( + // DataProtectionFinalizerName is the name of our custom finalizer + DataProtectionFinalizerName = "dataprotection.kubeblocks.io/finalizer" +) + +// annotation keys +const ( + // DefaultBackupPolicyAnnotationKey specifies the default backup policy. + DefaultBackupPolicyAnnotationKey = "dataprotection.kubeblocks.io/is-default-policy" + // DefaultBackupPolicyTemplateAnnotationKey specifies the default backup policy template. + DefaultBackupPolicyTemplateAnnotationKey = "dataprotection.kubeblocks.io/is-default-policy-template" + // DefaultBackupRepoAnnotationKey specifies the default backup repo. + DefaultBackupRepoAnnotationKey = "dataprotection.kubeblocks.io/is-default-repo" + // BackupDataPathPrefixAnnotationKey specifies the backup data path prefix. + BackupDataPathPrefixAnnotationKey = "dataprotection.kubeblocks.io/path-prefix" + // ReconfigureRefAnnotationKey specifies the reconfigure ref. + ReconfigureRefAnnotationKey = "dataprotection.kubeblocks.io/reconfigure-ref" +) + +// label keys +const ( + // DataProtectionLabelClusterUIDKey specifies the cluster UID label key. + DataProtectionLabelClusterUIDKey = "dataprotection.kubeblocks.io/cluster-uid" + // BackupTypeLabelKeyKey specifies the backup type label key. + BackupTypeLabelKeyKey = "dataprotection.kubeblocks.io/backup-type" + // DataProtectionLabelBackupNameKey specifies the backup name label key. + DataProtectionLabelBackupNameKey = "dataprotection.kubeblocks.io/backup-name" + // DataProtectionLabelBackupScheduleKey specifies the backup schedule label key. + DataProtectionLabelBackupScheduleKey = "dataprotection.kubeblocks.io/backup-schedule" + // DataProtectionLabelBackupPolicyKey specifies the backup policy label key. + DataProtectionLabelBackupPolicyKey = "dataprotection.kubeblocks.io/backup-policy" + // DataProtectionLabelBackupMethodKey specifies the backup method label key. + DataProtectionLabelBackupMethodKey = "dataprotection.kubeblocks.io/backup-method" + // DataProtectionLabelBackupTypeKey specifies the backup type label key. + DataProtectionLabelBackupTypeKey = "dataprotection.kubeblocks.io/backup-type" + // DataProtectionLabelAutoBackupKey specifies the auto backup label key. + DataProtectionLabelAutoBackupKey = "dataprotection.kubeblocks.io/autobackup" +) + +// env names +const ( + // DPDBHost database host for dataProtection + DPDBHost = "DP_DB_HOST" + // DPDBUser database user for dataProtection + DPDBUser = "DP_DB_USER" + // DPDBPassword database password for dataProtection + DPDBPassword = "DP_DB_PASSWORD" + // DPDBEndpoint database endpoint for dataProtection + DPDBEndpoint = "DP_DB_ENDPOINT" + // DPDBPort database port for dataProtection + DPDBPort = "DP_DB_PORT" + // DPTargetPodName the target pod name + DPTargetPodName = "DP_TARGET_POD_NAME" + // DPBackupDIR the dest directory for backup data + DPBackupDIR = "DP_BACKUP_DIR" + // DPBackupName backup CR name + DPBackupName = "DP_BACKUP_NAME" + // DPTTL backup time to live, reference the backupPolicy.spec.retention.ttl + DPTTL = "DP_TTL" + // DPCheckInterval check interval for continue backup + DPCheckInterval = "DP_CHECK_INTERVAL" + // DPBackupInfoFile the file name which retains the backup.status info + DPBackupInfoFile = "DP_BACKUP_INFO_FILE" + // DPTimeFormat golang time format string + DPTimeFormat = "TIME_FORMAT" + // DPVolumeDataDIR the volume data dir + DPVolumeDataDIR = "VOLUME_DATA_DIR" // + // DPKBRecoveryTime recovery time + DPKBRecoveryTime = "KB_RECOVERY_TIME" // recovery time + // DPKBRecoveryTimestamp recovery timestamp + DPKBRecoveryTimestamp = "KB_RECOVERY_TIMESTAMP" // recovery timestamp + // DPBaseBackupStartTime base backup start time for pitr + DPBaseBackupStartTime = "BASE_BACKUP_START_TIME" // base backup start time for pitr + // DPBaseBackupStartTimestamp base backup start timestamp for pitr + DPBaseBackupStartTimestamp = "BASE_BACKUP_START_TIMESTAMP" // base backup start timestamp for pitr + // DPBackupStopTime backup stop time + DPBackupStopTime = "BACKUP_STOP_TIME" // backup stop time +) diff --git a/internal/dataprotection/types/types.go b/internal/dataprotection/types/types.go new file mode 100644 index 00000000000..a12b87b5eec --- /dev/null +++ b/internal/dataprotection/types/types.go @@ -0,0 +1,25 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package types + +var ( + // DefaultBackOffLimit is the default backoff limit for jobs. + DefaultBackOffLimit = int32(3) +) diff --git a/internal/dataprotection/utils/boolptr/boolptr.go b/internal/dataprotection/utils/boolptr/boolptr.go new file mode 100644 index 00000000000..5ab816092ba --- /dev/null +++ b/internal/dataprotection/utils/boolptr/boolptr.go @@ -0,0 +1,42 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package boolptr + +// IsSetToTrue returns true if and only if the bool pointer is non-nil and set to true. +func IsSetToTrue(b *bool) bool { + return b != nil && *b +} + +// IsSetToFalse returns true if and only if the bool pointer is non-nil and set to false. +func IsSetToFalse(b *bool) bool { + return b != nil && !*b +} + +// True returns a *bool whose underlying value is true. +func True() *bool { + t := true + return &t +} + +// False returns a *bool whose underlying value is false. +func False() *bool { + t := false + return &t +} diff --git a/internal/dataprotection/utils/boolptr/boolptr_test.go b/internal/dataprotection/utils/boolptr/boolptr_test.go new file mode 100644 index 00000000000..ff72c3c9733 --- /dev/null +++ b/internal/dataprotection/utils/boolptr/boolptr_test.go @@ -0,0 +1,39 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package boolptr + +import ( + "testing" +) + +func TestBoolPtr(t *testing.T) { + if !IsSetToTrue(True()) { + t.Errorf("True() should return a pointer to true") + } + if IsSetToTrue(False()) { + t.Errorf("False() should return a pointer to false") + } + if IsSetToFalse(True()) { + t.Errorf("True() should return a pointer to true") + } + if !IsSetToFalse(False()) { + t.Errorf("False() should return a pointer to false") + } +} diff --git a/internal/dataprotection/utils/envvar.go b/internal/dataprotection/utils/envvar.go new file mode 100644 index 00000000000..8b1a529f2c4 --- /dev/null +++ b/internal/dataprotection/utils/envvar.go @@ -0,0 +1,63 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package utils + +import ( + corev1 "k8s.io/api/core/v1" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" +) + +func BuildEnvByCredential(pod *corev1.Pod, credential *dpv1alpha1.ConnectionCredential) []corev1.EnvVar { + var envVars []corev1.EnvVar + if credential == nil { + return nil + } + var hostEnv corev1.EnvVar + if credential.HostKey == "" { + hostEnv = corev1.EnvVar{Name: dptypes.DPDBHost, + Value: intctrlutil.BuildPodHostDNS(pod)} + } else { + hostEnv = buildEnvBySecretKey(dptypes.DPDBHost, credential.SecretName, credential.HostKey) + } + envVars = append(envVars, + buildEnvBySecretKey(dptypes.DPDBUser, credential.SecretName, credential.UsernameKey), + buildEnvBySecretKey(dptypes.DPDBPassword, credential.SecretName, credential.PasswordKey), + buildEnvBySecretKey(dptypes.DPDBPort, credential.SecretName, credential.PortKey), + hostEnv, + ) + return envVars +} + +func buildEnvBySecretKey(name, secretName, key string) corev1.EnvVar { + return corev1.EnvVar{ + Name: name, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretName, + }, + Key: key, + }, + }, + } +} diff --git a/internal/dataprotection/utils/utils.go b/internal/dataprotection/utils/utils.go new file mode 100644 index 00000000000..0da5fb69bbb --- /dev/null +++ b/internal/dataprotection/utils/utils.go @@ -0,0 +1,135 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package utils + +import ( + "context" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +func AddTolerations(podSpec *corev1.PodSpec) (err error) { + if cmTolerations := viper.GetString(constant.CfgKeyCtrlrMgrTolerations); cmTolerations != "" { + if err = json.Unmarshal([]byte(cmTolerations), &podSpec.Tolerations); err != nil { + return err + } + } + if cmAffinity := viper.GetString(constant.CfgKeyCtrlrMgrAffinity); cmAffinity != "" { + if err = json.Unmarshal([]byte(cmAffinity), &podSpec.Affinity); err != nil { + return err + } + } + if cmNodeSelector := viper.GetString(constant.CfgKeyCtrlrMgrNodeSelector); cmNodeSelector != "" { + if err = json.Unmarshal([]byte(cmNodeSelector), &podSpec.NodeSelector); err != nil { + return err + } + } + return nil +} + +func IsJobFinished(job *batchv1.Job) (bool, batchv1.JobConditionType, string) { + for _, c := range job.Status.Conditions { + if c.Type == batchv1.JobComplete && c.Status == corev1.ConditionTrue { + return true, c.Type, "" + } + if c.Type == batchv1.JobFailed && c.Status == corev1.ConditionTrue { + return true, c.Type, c.Reason + "/" + c.Message + } + } + return false, "", "" +} + +func RemoveDataProtectionFinalizer(ctx context.Context, cli client.Client, obj client.Object) error { + if !controllerutil.ContainsFinalizer(obj, dptypes.DataProtectionFinalizerName) { + return nil + } + patch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + controllerutil.RemoveFinalizer(obj, dptypes.DataProtectionFinalizerName) + return cli.Patch(ctx, obj, patch) +} + +// GetActionSetByName gets the ActionSet by name. +func GetActionSetByName(reqCtx intctrlutil.RequestCtx, + cli client.Client, name string) (*dpv1alpha1.ActionSet, error) { + if name == "" { + return nil, nil + } + as := &dpv1alpha1.ActionSet{} + if err := cli.Get(reqCtx.Ctx, client.ObjectKey{Name: name}, as); err != nil { + reqCtx.Log.Error(err, "failed to get ActionSet for backup.", "ActionSet", name) + return nil, err + } + return as, nil +} + +func GetPodListByLabelSelector(reqCtx intctrlutil.RequestCtx, + cli client.Client, + labelSelector metav1.LabelSelector) (*corev1.PodList, error) { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return nil, err + } + targetPodList := &corev1.PodList{} + if err = cli.List(reqCtx.Ctx, targetPodList, + client.InNamespace(reqCtx.Req.Namespace), + client.MatchingLabelsSelector{Selector: selector}); err != nil { + return nil, err + } + return targetPodList, nil +} + +func GetBackupVolumeSnapshotName(backupName, volumeSource string) string { + return fmt.Sprintf("%s-%s", backupName, volumeSource) +} + +// MergeEnv merges the targetEnv to original env. if original env exist the same name var, it will be replaced. +func MergeEnv(originalEnv, targetEnv []corev1.EnvVar) []corev1.EnvVar { + if len(targetEnv) == 0 { + return originalEnv + } + originalEnvIndexMap := map[string]int{} + for i := range originalEnv { + originalEnvIndexMap[originalEnv[i].Name] = i + } + for i := range targetEnv { + if index, ok := originalEnvIndexMap[targetEnv[i].Name]; ok { + originalEnv[index] = targetEnv[i] + } else { + originalEnv = append(originalEnv, targetEnv[i]) + } + } + return originalEnv +} + +func VolumeSnapshotEnabled() bool { + return viper.GetBool("VOLUMESNAPSHOT") +} diff --git a/internal/generics/type.go b/internal/generics/type.go index 94f8536cbba..2adc22def64 100644 --- a/internal/generics/type.go +++ b/internal/generics/type.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" @@ -104,16 +104,19 @@ var ConfigConstraintSignature = func(_ appsv1alpha1.ConfigConstraint, _ *appsv1a var BackupPolicyTemplateSignature = func(_ appsv1alpha1.BackupPolicyTemplate, _ *appsv1alpha1.BackupPolicyTemplate, _ appsv1alpha1.BackupPolicyTemplateList, _ *appsv1alpha1.BackupPolicyTemplateList) { } -var BackupPolicySignature = func(_ dataprotectionv1alpha1.BackupPolicy, _ *dataprotectionv1alpha1.BackupPolicy, _ dataprotectionv1alpha1.BackupPolicyList, _ *dataprotectionv1alpha1.BackupPolicyList) { +var BackupPolicySignature = func(_ dpv1alpha1.BackupPolicy, _ *dpv1alpha1.BackupPolicy, _ dpv1alpha1.BackupPolicyList, _ *dpv1alpha1.BackupPolicyList) { } -var BackupSignature = func(_ dataprotectionv1alpha1.Backup, _ *dataprotectionv1alpha1.Backup, _ dataprotectionv1alpha1.BackupList, _ *dataprotectionv1alpha1.BackupList) { +var BackupSignature = func(_ dpv1alpha1.Backup, _ *dpv1alpha1.Backup, _ dpv1alpha1.BackupList, _ *dpv1alpha1.BackupList) { } -var BackupToolSignature = func(_ dataprotectionv1alpha1.BackupTool, _ *dataprotectionv1alpha1.BackupTool, _ dataprotectionv1alpha1.BackupToolList, _ *dataprotectionv1alpha1.BackupToolList) { +var BackupScheduleSignature = func(_ dpv1alpha1.BackupSchedule, _ *dpv1alpha1.BackupSchedule, _ dpv1alpha1.BackupScheduleList, _ *dpv1alpha1.BackupScheduleList) { } -var RestoreJobSignature = func(_ dataprotectionv1alpha1.RestoreJob, _ *dataprotectionv1alpha1.RestoreJob, _ dataprotectionv1alpha1.RestoreJobList, _ *dataprotectionv1alpha1.RestoreJobList) { +var RestoreSignature = func(_ dpv1alpha1.Restore, _ *dpv1alpha1.Restore, _ dpv1alpha1.RestoreList, _ *dpv1alpha1.RestoreList) { } -var BackupRepoSignature = func(_ dataprotectionv1alpha1.BackupRepo, _ *dataprotectionv1alpha1.BackupRepo, _ dataprotectionv1alpha1.BackupRepoList, _ *dataprotectionv1alpha1.BackupRepoList) { +var ActionSetSignature = func(_ dpv1alpha1.ActionSet, _ *dpv1alpha1.ActionSet, _ dpv1alpha1.ActionSetList, _ *dpv1alpha1.ActionSetList) { } +var BackupRepoSignature = func(_ dpv1alpha1.BackupRepo, _ *dpv1alpha1.BackupRepo, _ dpv1alpha1.BackupRepoList, _ *dpv1alpha1.BackupRepoList) { +} + var AddonSignature = func(_ extensionsv1alpha1.Addon, _ *extensionsv1alpha1.Addon, _ extensionsv1alpha1.AddonList, _ *extensionsv1alpha1.AddonList) { } var ComponentResourceConstraintSignature = func(_ appsv1alpha1.ComponentResourceConstraint, _ *appsv1alpha1.ComponentResourceConstraint, _ appsv1alpha1.ComponentResourceConstraintList, _ *appsv1alpha1.ComponentResourceConstraintList) { diff --git a/internal/testutil/apps/backup_factory.go b/internal/testutil/apps/backup_factory.go deleted file mode 100644 index 5695d1d00d9..00000000000 --- a/internal/testutil/apps/backup_factory.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" -) - -type MockBackupFactory struct { - BaseFactory[dataprotectionv1alpha1.Backup, *dataprotectionv1alpha1.Backup, MockBackupFactory] -} - -func NewBackupFactory(namespace, name string) *MockBackupFactory { - f := &MockBackupFactory{} - f.init(namespace, name, - &dataprotectionv1alpha1.Backup{ - Spec: dataprotectionv1alpha1.BackupSpec{}, - }, f) - return f -} - -func (factory *MockBackupFactory) SetBackupPolicyName(backupPolicyName string) *MockBackupFactory { - factory.get().Spec.BackupPolicyName = backupPolicyName - return factory -} - -func (factory *MockBackupFactory) SetBackupType(backupType dataprotectionv1alpha1.BackupType) *MockBackupFactory { - factory.get().Spec.BackupType = backupType - return factory -} - -func (factory *MockBackupFactory) SetLabels(labels map[string]string) *MockBackupFactory { - factory.get().SetLabels(labels) - return factory -} - -func (factory *MockBackupFactory) SetBackLog(startTime, stopTime time.Time) *MockBackupFactory { - manifests := factory.get().Status.Manifests - if manifests == nil { - manifests = &dataprotectionv1alpha1.ManifestsStatus{} - } - if manifests.BackupLog == nil { - manifests.BackupLog = &dataprotectionv1alpha1.BackupLogStatus{} - } - manifests.BackupLog.StartTime = &metav1.Time{Time: startTime} - manifests.BackupLog.StopTime = &metav1.Time{Time: stopTime} - factory.get().Status.Manifests = manifests - return factory -} diff --git a/internal/testutil/apps/backuppolicy_factory.go b/internal/testutil/apps/backuppolicy_factory.go deleted file mode 100644 index 2b010383e6b..00000000000 --- a/internal/testutil/apps/backuppolicy_factory.go +++ /dev/null @@ -1,236 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" -) - -type MockBackupPolicyFactory struct { - BaseFactory[dataprotectionv1alpha1.BackupPolicy, *dataprotectionv1alpha1.BackupPolicy, MockBackupPolicyFactory] - backupType dataprotectionv1alpha1.BackupType -} - -func NewBackupPolicyFactory(namespace, name string) *MockBackupPolicyFactory { - f := &MockBackupPolicyFactory{} - f.init(namespace, name, - &dataprotectionv1alpha1.BackupPolicy{}, f) - return f -} - -func (factory *MockBackupPolicyFactory) setBasePolicyField(setField func(basePolicy *dataprotectionv1alpha1.BasePolicy)) { - var basePolicy *dataprotectionv1alpha1.BasePolicy - switch factory.backupType { - case dataprotectionv1alpha1.BackupTypeDataFile: - basePolicy = &factory.get().Spec.Datafile.BasePolicy - case dataprotectionv1alpha1.BackupTypeLogFile: - basePolicy = &factory.get().Spec.Logfile.BasePolicy - case dataprotectionv1alpha1.BackupTypeSnapshot: - basePolicy = &factory.get().Spec.Snapshot.BasePolicy - } - if basePolicy == nil { - // ignore - return - } - setField(basePolicy) -} - -func (factory *MockBackupPolicyFactory) setCommonPolicyField(setField func(commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy)) { - var commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy - switch factory.backupType { - case dataprotectionv1alpha1.BackupTypeDataFile: - commonPolicy = factory.get().Spec.Datafile - case dataprotectionv1alpha1.BackupTypeLogFile: - commonPolicy = factory.get().Spec.Logfile - } - if commonPolicy == nil { - // ignore - return - } - setField(commonPolicy) -} - -func (factory *MockBackupPolicyFactory) setScheduleField(setField func(schedulePolicy *dataprotectionv1alpha1.SchedulePolicy)) { - var schedulePolicy *dataprotectionv1alpha1.SchedulePolicy - switch factory.backupType { - case dataprotectionv1alpha1.BackupTypeDataFile: - factory.get().Spec.Schedule.Datafile = &dataprotectionv1alpha1.SchedulePolicy{} - schedulePolicy = factory.get().Spec.Schedule.Datafile - case dataprotectionv1alpha1.BackupTypeSnapshot: - factory.get().Spec.Schedule.Snapshot = &dataprotectionv1alpha1.SchedulePolicy{} - schedulePolicy = factory.get().Spec.Schedule.Snapshot - case dataprotectionv1alpha1.BackupTypeLogFile: - factory.get().Spec.Schedule.Logfile = &dataprotectionv1alpha1.SchedulePolicy{} - schedulePolicy = factory.get().Spec.Schedule.Logfile - } - if schedulePolicy == nil { - // ignore - return - } - setField(schedulePolicy) -} - -func (factory *MockBackupPolicyFactory) AddSnapshotPolicy() *MockBackupPolicyFactory { - factory.get().Spec.Snapshot = &dataprotectionv1alpha1.SnapshotPolicy{ - Hooks: &dataprotectionv1alpha1.BackupPolicyHook{}, - } - factory.backupType = dataprotectionv1alpha1.BackupTypeSnapshot - return factory -} - -func (factory *MockBackupPolicyFactory) AddDataFilePolicy() *MockBackupPolicyFactory { - factory.get().Spec.Datafile = &dataprotectionv1alpha1.CommonBackupPolicy{ - PersistentVolumeClaim: dataprotectionv1alpha1.PersistentVolumeClaim{ - Name: pointer.String("backup-data"), - CreatePolicy: dataprotectionv1alpha1.CreatePVCPolicyIfNotPresent, - }, - } - factory.backupType = dataprotectionv1alpha1.BackupTypeDataFile - return factory -} - -func (factory *MockBackupPolicyFactory) AddLogfilePolicy() *MockBackupPolicyFactory { - factory.get().Spec.Logfile = &dataprotectionv1alpha1.CommonBackupPolicy{ - PersistentVolumeClaim: dataprotectionv1alpha1.PersistentVolumeClaim{ - Name: pointer.String("backup-data"), - CreatePolicy: dataprotectionv1alpha1.CreatePVCPolicyIfNotPresent, - }, - } - factory.backupType = dataprotectionv1alpha1.BackupTypeLogFile - return factory -} - -func (factory *MockBackupPolicyFactory) SetBackupToolName(backupToolName string) *MockBackupPolicyFactory { - factory.setCommonPolicyField(func(commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy) { - commonPolicy.BackupToolName = backupToolName - }) - return factory -} - -func (factory *MockBackupPolicyFactory) SetSchedule(schedule string, enable bool) *MockBackupPolicyFactory { - factory.setScheduleField(func(schedulePolicy *dataprotectionv1alpha1.SchedulePolicy) { - schedulePolicy.Enable = enable - schedulePolicy.CronExpression = schedule - }) - return factory -} - -func (factory *MockBackupPolicyFactory) SetScheduleStartingDeadlineMinutes(startingDeadlineMinutes *int64) *MockBackupPolicyFactory { - factory.get().Spec.Schedule.StartingDeadlineMinutes = startingDeadlineMinutes - return factory -} - -func (factory *MockBackupPolicyFactory) SetTTL(duration string) *MockBackupPolicyFactory { - factory.get().Spec.Retention = &dataprotectionv1alpha1.RetentionSpec{ - TTL: &duration, - } - return factory -} - -func (factory *MockBackupPolicyFactory) SetBackupsHistoryLimit(backupsHistoryLimit int32) *MockBackupPolicyFactory { - factory.setBasePolicyField(func(basePolicy *dataprotectionv1alpha1.BasePolicy) { - basePolicy.BackupsHistoryLimit = backupsHistoryLimit - }) - return factory -} - -func (factory *MockBackupPolicyFactory) AddMatchLabels(keyAndValues ...string) *MockBackupPolicyFactory { - matchLabels := make(map[string]string) - for k, v := range WithMap(keyAndValues...) { - matchLabels[k] = v - } - factory.setBasePolicyField(func(basePolicy *dataprotectionv1alpha1.BasePolicy) { - basePolicy.Target.LabelsSelector = &metav1.LabelSelector{ - MatchLabels: matchLabels, - } - }) - return factory -} - -func (factory *MockBackupPolicyFactory) SetTargetSecretName(name string) *MockBackupPolicyFactory { - factory.setBasePolicyField(func(basePolicy *dataprotectionv1alpha1.BasePolicy) { - basePolicy.Target.Secret = &dataprotectionv1alpha1.BackupPolicySecret{Name: name} - }) - return factory -} - -func (factory *MockBackupPolicyFactory) SetHookContainerName(containerName string) *MockBackupPolicyFactory { - snapshotPolicy := factory.get().Spec.Snapshot - if snapshotPolicy == nil { - return factory - } - snapshotPolicy.Hooks.ContainerName = containerName - return factory -} - -func (factory *MockBackupPolicyFactory) AddHookPreCommand(preCommand string) *MockBackupPolicyFactory { - snapshotPolicy := factory.get().Spec.Snapshot - if snapshotPolicy == nil { - return factory - } - preCommands := &snapshotPolicy.Hooks.PreCommands - *preCommands = append(*preCommands, preCommand) - return factory -} - -func (factory *MockBackupPolicyFactory) AddHookPostCommand(postCommand string) *MockBackupPolicyFactory { - snapshotPolicy := factory.get().Spec.Snapshot - if snapshotPolicy == nil { - return factory - } - postCommands := &snapshotPolicy.Hooks.PostCommands - *postCommands = append(*postCommands, postCommand) - return factory -} - -func (factory *MockBackupPolicyFactory) SetPVC(pvcName string) *MockBackupPolicyFactory { - factory.setCommonPolicyField(func(commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy) { - if pvcName == "" { - commonPolicy.PersistentVolumeClaim.Name = nil - } else { - commonPolicy.PersistentVolumeClaim.Name = &pvcName - } - commonPolicy.PersistentVolumeClaim.InitCapacity = resource.MustParse(constant.DefaultBackupPvcInitCapacity) - }) - return factory -} - -func (factory *MockBackupPolicyFactory) SetBackupRepo(repoName string) *MockBackupPolicyFactory { - factory.setCommonPolicyField(func(commonPolicy *dataprotectionv1alpha1.CommonBackupPolicy) { - if repoName == "" { - commonPolicy.BackupRepoName = nil - } else { - commonPolicy.BackupRepoName = &repoName - } - }) - return factory -} - -func (factory *MockBackupPolicyFactory) SetBackupStatusUpdates(backupStatusUpdates []dataprotectionv1alpha1.BackupStatusUpdate) *MockBackupPolicyFactory { - factory.setBasePolicyField(func(basePolicy *dataprotectionv1alpha1.BasePolicy) { - basePolicy.BackupStatusUpdates = backupStatusUpdates - }) - return factory -} diff --git a/internal/testutil/apps/backuppolicytemplate_factory.go b/internal/testutil/apps/backuppolicytemplate_factory.go index c0aef4858e0..b7a5026b5c5 100644 --- a/internal/testutil/apps/backuppolicytemplate_factory.go +++ b/internal/testutil/apps/backuppolicytemplate_factory.go @@ -20,195 +20,120 @@ along with this program. If not, see . package apps import ( + corev1 "k8s.io/api/core/v1" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" ) type MockBackupPolicyTemplateFactory struct { BaseFactory[appsv1alpha1.BackupPolicyTemplate, *appsv1alpha1.BackupPolicyTemplate, MockBackupPolicyTemplateFactory] - backupType dataprotectionv1alpha1.BackupType } func NewBackupPolicyTemplateFactory(name string) *MockBackupPolicyTemplateFactory { f := &MockBackupPolicyTemplateFactory{} - f.init("", name, + f.Init("", name, &appsv1alpha1.BackupPolicyTemplate{}, f) return f } -func (factory *MockBackupPolicyTemplateFactory) SetClusterDefRef(clusterDefRef string) *MockBackupPolicyTemplateFactory { - factory.get().Spec.ClusterDefRef = clusterDefRef - return factory +func (f *MockBackupPolicyTemplateFactory) SetClusterDefRef(clusterDefRef string) *MockBackupPolicyTemplateFactory { + f.Get().Spec.ClusterDefRef = clusterDefRef + return f } -func (factory *MockBackupPolicyTemplateFactory) getLastBackupPolicy() *appsv1alpha1.BackupPolicy { - l := len(factory.get().Spec.BackupPolicies) +func (f *MockBackupPolicyTemplateFactory) getLastBackupPolicy() *appsv1alpha1.BackupPolicy { + l := len(f.Get().Spec.BackupPolicies) if l == 0 { return nil } - backupPolicies := factory.get().Spec.BackupPolicies + backupPolicies := f.Get().Spec.BackupPolicies return &backupPolicies[l-1] } -func (factory *MockBackupPolicyTemplateFactory) AddBackupPolicy(componentDef string) *MockBackupPolicyTemplateFactory { - factory.get().Spec.BackupPolicies = append(factory.get().Spec.BackupPolicies, appsv1alpha1.BackupPolicy{ - ComponentDefRef: componentDef, - }) - return factory -} - -func (factory *MockBackupPolicyTemplateFactory) SetTTL(duration string) *MockBackupPolicyTemplateFactory { - factory.getLastBackupPolicy().Retention = &appsv1alpha1.RetentionSpec{ - TTL: &duration, +func (f *MockBackupPolicyTemplateFactory) getLastBackupMethod() *dpv1alpha1.BackupMethod { + backupPolicy := f.getLastBackupPolicy() + l := len(backupPolicy.BackupMethods) + if l == 0 { + return nil } - return factory + backupMethods := backupPolicy.BackupMethods + return &backupMethods[l-1] } -func (factory *MockBackupPolicyTemplateFactory) setBasePolicyField(setField func(basePolicy *appsv1alpha1.BasePolicy)) { - backupPolicy := factory.getLastBackupPolicy() - var basePolicy *appsv1alpha1.BasePolicy - switch factory.backupType { - case dataprotectionv1alpha1.BackupTypeDataFile: - basePolicy = &backupPolicy.Datafile.BasePolicy - case dataprotectionv1alpha1.BackupTypeLogFile: - basePolicy = &backupPolicy.Logfile.BasePolicy - case dataprotectionv1alpha1.BackupTypeSnapshot: - basePolicy = &backupPolicy.Snapshot.BasePolicy - } - if basePolicy == nil { - // ignore - return - } - setField(basePolicy) +func (f *MockBackupPolicyTemplateFactory) AddBackupPolicy(componentDef string) *MockBackupPolicyTemplateFactory { + f.Get().Spec.BackupPolicies = append(f.Get().Spec.BackupPolicies, appsv1alpha1.BackupPolicy{ + ComponentDefRef: componentDef, + }) + return f } -func (factory *MockBackupPolicyTemplateFactory) setCommonPolicyField(setField func(commonPolicy *appsv1alpha1.CommonBackupPolicy)) { - backupPolicy := factory.getLastBackupPolicy() - var commonPolicy *appsv1alpha1.CommonBackupPolicy - switch factory.backupType { - case dataprotectionv1alpha1.BackupTypeDataFile: - commonPolicy = backupPolicy.Datafile - case dataprotectionv1alpha1.BackupTypeLogFile: - commonPolicy = backupPolicy.Logfile - } - if commonPolicy == nil { - // ignore - return - } - setField(commonPolicy) +func (f *MockBackupPolicyTemplateFactory) SetRetentionPeriod(duration string) *MockBackupPolicyTemplateFactory { + f.getLastBackupPolicy().RetentionPeriod = dpv1alpha1.RetentionPeriod(duration) + return f } -func (factory *MockBackupPolicyTemplateFactory) setScheduleField(setField func(schedulePolicy *appsv1alpha1.SchedulePolicy)) { - backupPolicy := factory.getLastBackupPolicy() - var schedulePolicy *appsv1alpha1.SchedulePolicy - switch factory.backupType { - case dataprotectionv1alpha1.BackupTypeSnapshot: - backupPolicy.Schedule.Snapshot = &appsv1alpha1.SchedulePolicy{} - schedulePolicy = backupPolicy.Schedule.Snapshot - case dataprotectionv1alpha1.BackupTypeDataFile: - backupPolicy.Schedule.Datafile = &appsv1alpha1.SchedulePolicy{} - schedulePolicy = backupPolicy.Schedule.Datafile - case dataprotectionv1alpha1.BackupTypeLogFile: - backupPolicy.Schedule.Logfile = &appsv1alpha1.SchedulePolicy{} - schedulePolicy = backupPolicy.Schedule.Logfile - } - if schedulePolicy == nil { +func (f *MockBackupPolicyTemplateFactory) setBackupPolicyField(setField func(backupPolicy *appsv1alpha1.BackupPolicy)) { + backupPolicy := f.getLastBackupPolicy() + if backupPolicy == nil { // ignore return } - setField(schedulePolicy) + setField(backupPolicy) } -func (factory *MockBackupPolicyTemplateFactory) AddSnapshotPolicy() *MockBackupPolicyTemplateFactory { - backupPolicy := factory.getLastBackupPolicy() - backupPolicy.Snapshot = &appsv1alpha1.SnapshotPolicy{ - Hooks: &appsv1alpha1.BackupPolicyHook{}, +func (f *MockBackupPolicyTemplateFactory) AddSchedule(method, schedule string, enable bool) *MockBackupPolicyTemplateFactory { + schedulePolicy := appsv1alpha1.SchedulePolicy{ + Enabled: &enable, + CronExpression: schedule, + BackupMethod: method, } - factory.backupType = dataprotectionv1alpha1.BackupTypeSnapshot - return factory -} - -func (factory *MockBackupPolicyTemplateFactory) AddDatafilePolicy() *MockBackupPolicyTemplateFactory { - backupPolicy := factory.getLastBackupPolicy() - backupPolicy.Datafile = &appsv1alpha1.CommonBackupPolicy{} - factory.backupType = dataprotectionv1alpha1.BackupTypeDataFile - return factory -} - -func (factory *MockBackupPolicyTemplateFactory) AddIncrementalPolicy() *MockBackupPolicyTemplateFactory { - backupPolicy := factory.getLastBackupPolicy() - backupPolicy.Logfile = &appsv1alpha1.CommonBackupPolicy{} - factory.backupType = dataprotectionv1alpha1.BackupTypeLogFile - return factory + backupPolicy := f.getLastBackupPolicy() + backupPolicy.Schedules = append(backupPolicy.Schedules, schedulePolicy) + return f } -func (factory *MockBackupPolicyTemplateFactory) SetHookContainerName(containerName string) *MockBackupPolicyTemplateFactory { - backupPolicy := factory.getLastBackupPolicy() - if backupPolicy.Snapshot == nil { - return factory - } - backupPolicy.Snapshot.Hooks.ContainerName = containerName - return factory +func (f *MockBackupPolicyTemplateFactory) AddBackupMethod(name string, + snapshotVolumes bool, actionSetName string) *MockBackupPolicyTemplateFactory { + backupPolicy := f.getLastBackupPolicy() + backupPolicy.BackupMethods = append(backupPolicy.BackupMethods, + dpv1alpha1.BackupMethod{ + Name: name, + SnapshotVolumes: &snapshotVolumes, + ActionSetName: actionSetName, + TargetVolumes: &dpv1alpha1.TargetVolumeInfo{}, + }) + return f } -func (factory *MockBackupPolicyTemplateFactory) AddHookPreCommand(preCommand string) *MockBackupPolicyTemplateFactory { - backupPolicy := factory.getLastBackupPolicy() - if backupPolicy.Snapshot == nil { - return factory - } - preCommands := &backupPolicy.Snapshot.Hooks.PreCommands - *preCommands = append(*preCommands, preCommand) - return factory +func (f *MockBackupPolicyTemplateFactory) SetBackupMethodVolumes(names []string) *MockBackupPolicyTemplateFactory { + backupMethod := f.getLastBackupMethod() + backupMethod.TargetVolumes.Volumes = names + return f } -func (factory *MockBackupPolicyTemplateFactory) AddHookPostCommand(postCommand string) *MockBackupPolicyTemplateFactory { - backupPolicy := factory.getLastBackupPolicy() - if backupPolicy.Snapshot == nil { - return factory +func (f *MockBackupPolicyTemplateFactory) SetBackupMethodVolumeMounts(keyAndValues ...string) *MockBackupPolicyTemplateFactory { + var volumeMounts []corev1.VolumeMount + for k, v := range WithMap(keyAndValues...) { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: k, + MountPath: v, + }) } - postCommands := &backupPolicy.Snapshot.Hooks.PostCommands - *postCommands = append(*postCommands, postCommand) - return factory -} - -func (factory *MockBackupPolicyTemplateFactory) SetSchedule(schedule string, enable bool) *MockBackupPolicyTemplateFactory { - factory.setScheduleField(func(schedulePolicy *appsv1alpha1.SchedulePolicy) { - schedulePolicy.Enable = enable - schedulePolicy.CronExpression = schedule - }) - return factory -} - -func (factory *MockBackupPolicyTemplateFactory) SetBackupsHistoryLimit(backupsHistoryLimit int32) *MockBackupPolicyTemplateFactory { - factory.setBasePolicyField(func(basePolicy *appsv1alpha1.BasePolicy) { - basePolicy.BackupsHistoryLimit = backupsHistoryLimit - }) - return factory -} - -func (factory *MockBackupPolicyTemplateFactory) SetBackupToolName(backupToolName string) *MockBackupPolicyTemplateFactory { - factory.setCommonPolicyField(func(commonPolicy *appsv1alpha1.CommonBackupPolicy) { - commonPolicy.BackupToolName = backupToolName - }) - return factory -} - -func (factory *MockBackupPolicyTemplateFactory) SetTargetRole(role string) *MockBackupPolicyTemplateFactory { - factory.setBasePolicyField(func(basePolicy *appsv1alpha1.BasePolicy) { - basePolicy.Target.Role = role - }) - return factory + backupMethod := f.getLastBackupMethod() + backupMethod.TargetVolumes.VolumeMounts = volumeMounts + return f } -func (factory *MockBackupPolicyTemplateFactory) SetTargetAccount(account string) *MockBackupPolicyTemplateFactory { - factory.setBasePolicyField(func(basePolicy *appsv1alpha1.BasePolicy) { - basePolicy.Target.Account = account +func (f *MockBackupPolicyTemplateFactory) SetTargetRole(role string) *MockBackupPolicyTemplateFactory { + f.setBackupPolicyField(func(backupPolicy *appsv1alpha1.BackupPolicy) { + backupPolicy.Target.Role = role }) - return factory + return f } -func (factory *MockBackupPolicyTemplateFactory) SetLabels(labels map[string]string) *MockBackupPolicyTemplateFactory { - factory.get().SetLabels(labels) - return factory +func (f *MockBackupPolicyTemplateFactory) SetLabels(labels map[string]string) *MockBackupPolicyTemplateFactory { + f.Get().SetLabels(labels) + return f } diff --git a/internal/testutil/apps/base_factory.go b/internal/testutil/apps/base_factory.go index c1bff9d2751..8cc3aeb3d19 100644 --- a/internal/testutil/apps/base_factory.go +++ b/internal/testutil/apps/base_factory.go @@ -40,7 +40,7 @@ type BaseFactory[T intctrlutil.Object, PT intctrlutil.PObject[T], F any] struct concreteFactory *F } -func (factory *BaseFactory[T, PT, F]) init(namespace, name string, obj PT, f *F) { +func (factory *BaseFactory[T, PT, F]) Init(namespace, name string, obj PT, f *F) { obj.SetNamespace(namespace) obj.SetName(name) if obj.GetLabels() == nil { @@ -53,7 +53,7 @@ func (factory *BaseFactory[T, PT, F]) init(namespace, name string, obj PT, f *F) factory.concreteFactory = f } -func (factory *BaseFactory[T, PT, F]) get() PT { +func (factory *BaseFactory[T, PT, F]) Get() PT { return factory.object } @@ -89,7 +89,7 @@ func (factory *BaseFactory[T, PT, F]) AddAppComponentLabel(value string) *F { return factory.AddLabels(constant.KBAppComponentLabelKey, value) } -func (factory *BaseFactory[T, PT, F]) AddAppManangedByLabel() *F { +func (factory *BaseFactory[T, PT, F]) AddAppManagedByLabel() *F { return factory.AddLabels(constant.AppManagedByLabelKey, constant.AppName) } @@ -137,22 +137,24 @@ func (factory *BaseFactory[T, PT, F]) AddFinalizers(finalizers []string) *F { } func (factory *BaseFactory[T, PT, F]) Apply(changeFn func(PT)) *F { - changeFn(factory.object) + if changeFn != nil { + changeFn(factory.object) + } return factory.concreteFactory } func (factory *BaseFactory[T, PT, F]) Create(testCtx *testutil.TestContext) *F { - gomega.Expect(testCtx.CreateObj(testCtx.Ctx, factory.get())).Should(gomega.Succeed()) + gomega.Expect(testCtx.CreateObj(testCtx.Ctx, factory.Get())).Should(gomega.Succeed()) return factory.concreteFactory } func (factory *BaseFactory[T, PT, F]) CheckedCreate(testCtx *testutil.TestContext) *F { - gomega.Expect(testCtx.CheckedCreateObj(testCtx.Ctx, factory.get())).Should(gomega.Succeed()) + gomega.Expect(testCtx.CheckedCreateObj(testCtx.Ctx, factory.Get())).Should(gomega.Succeed()) return factory.concreteFactory } func (factory *BaseFactory[T, PT, F]) CreateCli(ctx context.Context, cli client.Client) *F { - gomega.Expect(cli.Create(ctx, factory.get())).Should(gomega.Succeed()) + gomega.Expect(cli.Create(ctx, factory.Get())).Should(gomega.Succeed()) return factory.concreteFactory } diff --git a/internal/testutil/apps/cluster_consensus_test_util.go b/internal/testutil/apps/cluster_consensus_test_util.go index 05e83a9fd6c..8adfa10e59d 100644 --- a/internal/testutil/apps/cluster_consensus_test_util.go +++ b/internal/testutil/apps/cluster_consensus_test_util.go @@ -120,11 +120,11 @@ func MockConsensusComponentStsPod( if sts != nil { stsUpdateRevision = sts.Status.UpdateRevision } - pod := NewPodFactory(testCtx.DefaultNamespace, podName). + podFactory := NewPodFactory(testCtx.DefaultNamespace, podName). SetOwnerReferences("apps/v1", constant.StatefulSetKind, sts). AddAppInstanceLabel(clusterName). AddAppComponentLabel(consensusCompName). - AddAppManangedByLabel(). + AddAppManagedByLabel(). AddRoleLabel(podRole). AddConsensusSetAccessModeLabel(accessMode). AddControllerRevisionHashLabel(stsUpdateRevision). @@ -149,8 +149,11 @@ func MockConsensusComponentStsPod( }, }, }, - }). - CheckedCreate(testCtx).GetObject() + }) + if sts != nil && sts.Labels[constant.AppNameLabelKey] != "" { + podFactory.AddAppNameLabel(sts.Labels[constant.AppNameLabelKey]) + } + pod := podFactory.CheckedCreate(testCtx).GetObject() patch := client.MergeFrom(pod.DeepCopy()) pod.Status.Conditions = []corev1.PodCondition{ { diff --git a/internal/testutil/apps/cluster_factory.go b/internal/testutil/apps/cluster_factory.go index b7c578ad341..edea542bc4e 100644 --- a/internal/testutil/apps/cluster_factory.go +++ b/internal/testutil/apps/cluster_factory.go @@ -20,14 +20,9 @@ along with this program. If not, see . package apps import ( - "fmt" - "time" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" ) type MockClusterFactory struct { @@ -36,7 +31,7 @@ type MockClusterFactory struct { func NewClusterFactory(namespace, name, cdRef, cvRef string) *MockClusterFactory { f := &MockClusterFactory{} - f.init(namespace, name, + f.Init(namespace, name, &appsv1alpha1.Cluster{ Spec: appsv1alpha1.ClusterSpec{ ClusterDefRef: cdRef, @@ -49,17 +44,17 @@ func NewClusterFactory(namespace, name, cdRef, cvRef string) *MockClusterFactory } func (factory *MockClusterFactory) SetClusterAffinity(affinity *appsv1alpha1.Affinity) *MockClusterFactory { - factory.get().Spec.Affinity = affinity + factory.Get().Spec.Affinity = affinity return factory } func (factory *MockClusterFactory) AddClusterToleration(toleration corev1.Toleration) *MockClusterFactory { - tolerations := factory.get().Spec.Tolerations + tolerations := factory.Get().Spec.Tolerations if len(tolerations) == 0 { tolerations = []corev1.Toleration{} } tolerations = append(tolerations, toleration) - factory.get().Spec.Tolerations = tolerations + factory.Get().Spec.Tolerations = tolerations return factory } @@ -68,21 +63,21 @@ func (factory *MockClusterFactory) AddComponent(compName string, compDefName str Name: compName, ComponentDefRef: compDefName, } - factory.get().Spec.ComponentSpecs = append(factory.get().Spec.ComponentSpecs, comp) + factory.Get().Spec.ComponentSpecs = append(factory.Get().Spec.ComponentSpecs, comp) return factory } func (factory *MockClusterFactory) SetReplicas(replicas int32) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].Replicas = replicas } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetServiceAccountName(serviceAccountName string) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].ServiceAccountName = serviceAccountName } @@ -90,43 +85,43 @@ func (factory *MockClusterFactory) SetServiceAccountName(serviceAccountName stri } func (factory *MockClusterFactory) SetResources(resources corev1.ResourceRequirements) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].Resources = resources } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetComponentAffinity(affinity *appsv1alpha1.Affinity) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].Affinity = affinity } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetEnabledLogs(logName ...string) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].EnabledLogs = logName } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetClassDefRef(classDefRef *appsv1alpha1.ClassDefRef) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].ClassDefRef = classDefRef } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) AddComponentToleration(toleration corev1.Toleration) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comp := comps[len(comps)-1] tolerations := comp.Tolerations @@ -137,13 +132,13 @@ func (factory *MockClusterFactory) AddComponentToleration(toleration corev1.Tole comp.Tolerations = tolerations comps[len(comps)-1] = comp } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) AddVolumeClaimTemplate(volumeName string, pvcSpec appsv1alpha1.PersistentVolumeClaimSpec) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comp := comps[len(comps)-1] comp.VolumeClaimTemplates = append(comp.VolumeClaimTemplates, @@ -153,48 +148,48 @@ func (factory *MockClusterFactory) AddVolumeClaimTemplate(volumeName string, }) comps[len(comps)-1] = comp } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetMonitor(monitor bool) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].Monitor = monitor } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetSwitchPolicy(switchPolicy *appsv1alpha1.ClusterSwitchPolicy) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].SwitchPolicy = switchPolicy } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetTLS(tls bool) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].TLS = tls } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetIssuer(issuer *appsv1alpha1.Issuer) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].Issuer = issuer } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) AddService(serviceName string, serviceType corev1.ServiceType) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comp := comps[len(comps)-1] comp.Services = append(comp.Services, @@ -204,32 +199,20 @@ func (factory *MockClusterFactory) AddService(serviceName string, serviceType co }) comps[len(comps)-1] = comp } - factory.get().Spec.ComponentSpecs = comps - return factory -} - -func (factory *MockClusterFactory) AddRestorePointInTime(restoreTime metav1.Time, compNames, sourceCluster string) *MockClusterFactory { - annotations := factory.get().Annotations - if annotations == nil { - annotations = map[string]string{} - } - annotations[constant.RestoreFromTimeAnnotationKey] = fmt.Sprintf(`{"%s":"%s"}`, compNames, restoreTime.Format(time.RFC3339)) - annotations[constant.RestoreFromSrcClusterAnnotationKey] = sourceCluster - - factory.get().Annotations = annotations + factory.Get().Spec.ComponentSpecs = comps return factory } func (factory *MockClusterFactory) SetBackup(backup *appsv1alpha1.ClusterBackup) *MockClusterFactory { - factory.get().Spec.Backup = backup + factory.Get().Spec.Backup = backup return factory } func (factory *MockClusterFactory) SetServiceRefs(serviceRefs []appsv1alpha1.ServiceRef) *MockClusterFactory { - comps := factory.get().Spec.ComponentSpecs + comps := factory.Get().Spec.ComponentSpecs if len(comps) > 0 { comps[len(comps)-1].ServiceRefs = serviceRefs } - factory.get().Spec.ComponentSpecs = comps + factory.Get().Spec.ComponentSpecs = comps return factory } diff --git a/internal/testutil/apps/cluster_replication_test_util.go b/internal/testutil/apps/cluster_replication_test_util.go index 51e5f835191..95e81429595 100644 --- a/internal/testutil/apps/cluster_replication_test_util.go +++ b/internal/testutil/apps/cluster_replication_test_util.go @@ -45,7 +45,7 @@ func MockReplicationComponentPod( SetOwnerReferences("apps/v1", constant.StatefulSetKind, sts). AddAppInstanceLabel(clusterName). AddAppComponentLabel(compName). - AddAppManangedByLabel(). + AddAppManagedByLabel(). AddRoleLabel(roleName). AddControllerRevisionHashLabel(sts.Status.UpdateRevision). AddContainer(corev1.Container{Name: DefaultRedisContainerName, Image: DefaultRedisImageName}). diff --git a/internal/testutil/apps/cluster_stateless_test_util.go b/internal/testutil/apps/cluster_stateless_test_util.go index 45f8c0fcefe..69e7ce76476 100644 --- a/internal/testutil/apps/cluster_stateless_test_util.go +++ b/internal/testutil/apps/cluster_stateless_test_util.go @@ -50,7 +50,7 @@ func MockStatelessPod(testCtx *testutil.TestContext, deploy *appsv1.Deployment, SetOwnerReferences("apps/v1", constant.ReplicaSetKind, newRs). AddAppInstanceLabel(clusterName). AddAppComponentLabel(componentName). - AddAppManangedByLabel(). + AddAppManagedByLabel(). AddContainer(corev1.Container{Name: DefaultNginxContainerName, Image: NginxImage}). Create(testCtx).GetObject() } diff --git a/internal/testutil/apps/clusterdef_factory.go b/internal/testutil/apps/clusterdef_factory.go index 828c9e71d21..b68e0dcf7cc 100644 --- a/internal/testutil/apps/clusterdef_factory.go +++ b/internal/testutil/apps/clusterdef_factory.go @@ -40,7 +40,7 @@ type MockClusterDefFactory struct { func NewClusterDefFactory(name string) *MockClusterDefFactory { f := &MockClusterDefFactory{} - f.init("", name, + f.Init("", name, &appsv1alpha1.ClusterDefinition{ Spec: appsv1alpha1.ClusterDefinitionSpec{ ComponentDefs: []appsv1alpha1.ClusterComponentDefinition{}, @@ -69,7 +69,7 @@ func (factory *MockClusterDefFactory) AddComponentDef(tplType ComponentDefTplTyp case StatelessNginxComponent: component = &statelessNginxComponent } - factory.get().Spec.ComponentDefs = append(factory.get().Spec.ComponentDefs, *component) + factory.Get().Spec.ComponentDefs = append(factory.Get().Spec.ComponentDefs, *component) comp := factory.getLastCompDef() comp.Name = compDefName return factory @@ -165,24 +165,24 @@ func (factory *MockClusterDefFactory) AddHorizontalScalePolicy(policy appsv1alph func (factory *MockClusterDefFactory) SetConnectionCredential( connectionCredential map[string]string, svc *appsv1alpha1.ServiceSpec) *MockClusterDefFactory { - factory.get().Spec.ConnectionCredential = connectionCredential + factory.Get().Spec.ConnectionCredential = connectionCredential factory.SetServiceSpec(svc) return factory } func (factory *MockClusterDefFactory) get1stCompDef() *appsv1alpha1.ClusterComponentDefinition { - if len(factory.get().Spec.ComponentDefs) == 0 { + if len(factory.Get().Spec.ComponentDefs) == 0 { return nil } - return &factory.get().Spec.ComponentDefs[0] + return &factory.Get().Spec.ComponentDefs[0] } func (factory *MockClusterDefFactory) getLastCompDef() *appsv1alpha1.ClusterComponentDefinition { - l := len(factory.get().Spec.ComponentDefs) + l := len(factory.Get().Spec.ComponentDefs) if l == 0 { return nil } - comps := factory.get().Spec.ComponentDefs + comps := factory.Get().Spec.ComponentDefs return &comps[l-1] } diff --git a/internal/testutil/apps/clusterversion_factory.go b/internal/testutil/apps/clusterversion_factory.go index 3ed7d3052d5..f1ed12cb603 100644 --- a/internal/testutil/apps/clusterversion_factory.go +++ b/internal/testutil/apps/clusterversion_factory.go @@ -31,7 +31,7 @@ type MockClusterVersionFactory struct { func NewClusterVersionFactory(name, cdRef string) *MockClusterVersionFactory { f := &MockClusterVersionFactory{} - f.init("", name, + f.Init("", name, &appsv1alpha1.ClusterVersion{ Spec: appsv1alpha1.ClusterVersionSpec{ ClusterDefinitionRef: cdRef, @@ -45,18 +45,18 @@ func (factory *MockClusterVersionFactory) AddComponentVersion(compDefName string comp := appsv1alpha1.ClusterComponentVersion{ ComponentDefRef: compDefName, } - factory.get().Spec.ComponentVersions = append(factory.get().Spec.ComponentVersions, comp) + factory.Get().Spec.ComponentVersions = append(factory.Get().Spec.ComponentVersions, comp) return factory } func (factory *MockClusterVersionFactory) AddInitContainer(container corev1.Container) *MockClusterVersionFactory { - comps := factory.get().Spec.ComponentVersions + comps := factory.Get().Spec.ComponentVersions if len(comps) > 0 { comp := comps[len(comps)-1] comp.VersionsCtx.InitContainers = append(comp.VersionsCtx.InitContainers, container) comps[len(comps)-1] = comp } - factory.get().Spec.ComponentVersions = comps + factory.Get().Spec.ComponentVersions = comps return factory } @@ -68,13 +68,13 @@ func (factory *MockClusterVersionFactory) AddInitContainerShort(name string, ima } func (factory *MockClusterVersionFactory) AddContainer(container corev1.Container) *MockClusterVersionFactory { - comps := factory.get().Spec.ComponentVersions + comps := factory.Get().Spec.ComponentVersions if len(comps) > 0 { comp := comps[len(comps)-1] comp.VersionsCtx.Containers = append(comp.VersionsCtx.Containers, container) comps[len(comps)-1] = comp } - factory.get().Spec.ComponentVersions = comps + factory.Get().Spec.ComponentVersions = comps return factory } @@ -87,7 +87,7 @@ func (factory *MockClusterVersionFactory) AddContainerShort(name string, image s func (factory *MockClusterVersionFactory) AddConfigTemplate(name string, configTemplateRef string, configConstraintRef string, volumeName string) *MockClusterVersionFactory { - comps := factory.get().Spec.ComponentVersions + comps := factory.Get().Spec.ComponentVersions if len(comps) > 0 { comp := comps[len(comps)-1] comp.ConfigSpecs = append(comp.ConfigSpecs, @@ -101,6 +101,6 @@ func (factory *MockClusterVersionFactory) AddConfigTemplate(name string, }) comps[len(comps)-1] = comp } - factory.get().Spec.ComponentVersions = comps + factory.Get().Spec.ComponentVersions = comps return factory } diff --git a/internal/testutil/apps/common_util.go b/internal/testutil/apps/common_util.go index 184a164bad7..e29d360c111 100644 --- a/internal/testutil/apps/common_util.go +++ b/internal/testutil/apps/common_util.go @@ -77,7 +77,7 @@ func ChangeObjStatus[T intctrlutil.Object, PT intctrlutil.PObject[T]](testCtx *t return testCtx.Cli.Status().Patch(testCtx.Ctx, pobj, patch) } -// Helper functions to get object, change its fields in input closure and update it. +// Helper functions to Get object, change its fields in input closure and update it. // Each helper is a wrapper of client.Get and client.Patch. // Each helper returns a Gomega assertion function, which should be passed into // Eventually() or Consistently() as the first parameter. diff --git a/internal/testutil/apps/componentclassdefinition_factory.go b/internal/testutil/apps/componentclassdefinition_factory.go index 1bfdfb322f1..71ab1f4122f 100644 --- a/internal/testutil/apps/componentclassdefinition_factory.go +++ b/internal/testutil/apps/componentclassdefinition_factory.go @@ -32,7 +32,7 @@ type MockComponentClassDefinitionFactory struct { func NewComponentClassDefinitionFactory(name, clusterDefinitionRef, componentType string) *MockComponentClassDefinitionFactory { f := &MockComponentClassDefinitionFactory{} - f.init("", name, &appsv1alpha1.ComponentClassDefinition{ + f.Init("", name, &appsv1alpha1.ComponentClassDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ @@ -46,7 +46,7 @@ func NewComponentClassDefinitionFactory(name, clusterDefinitionRef, componentTyp } func (factory *MockComponentClassDefinitionFactory) AddClasses(classes []appsv1alpha1.ComponentClass) *MockComponentClassDefinitionFactory { - groups := factory.get().Spec.Groups + groups := factory.Get().Spec.Groups groups = append(groups, appsv1alpha1.ComponentClassGroup{ Series: []appsv1alpha1.ComponentClassSeries{ { @@ -54,6 +54,6 @@ func (factory *MockComponentClassDefinitionFactory) AddClasses(classes []appsv1a }, }, }) - factory.get().Spec.Groups = groups + factory.Get().Spec.Groups = groups return factory } diff --git a/internal/testutil/apps/componentresourceconstraint_factory.go b/internal/testutil/apps/componentresourceconstraint_factory.go index ca5b9498fb5..e3501a3eafa 100644 --- a/internal/testutil/apps/componentresourceconstraint_factory.go +++ b/internal/testutil/apps/componentresourceconstraint_factory.go @@ -95,7 +95,7 @@ type MockComponentResourceConstraintFactory struct { func NewComponentResourceConstraintFactory(name string) *MockComponentResourceConstraintFactory { f := &MockComponentResourceConstraintFactory{} - f.init("", name, &appsv1alpha1.ComponentResourceConstraint{ + f.Init("", name, &appsv1alpha1.ComponentResourceConstraint{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ @@ -110,7 +110,7 @@ func (factory *MockComponentResourceConstraintFactory) AddConstraints(constraint var ( tpl string newConstraints []appsv1alpha1.ResourceConstraintRule - constraints = factory.get().Spec.Rules + constraints = factory.Get().Spec.Rules ) switch constraintTplType { case GeneralResourceConstraint: @@ -124,11 +124,11 @@ func (factory *MockComponentResourceConstraintFactory) AddConstraints(constraint panic(err) } constraints = append(constraints, newConstraints...) - factory.get().Spec.Rules = constraints + factory.Get().Spec.Rules = constraints return factory } func (factory *MockComponentResourceConstraintFactory) AddSelector(selector appsv1alpha1.ClusterResourceConstraintSelector) *MockComponentResourceConstraintFactory { - factory.get().Spec.Selector = append(factory.get().Spec.Selector, selector) + factory.Get().Spec.Selector = append(factory.Get().Spec.Selector, selector) return factory } diff --git a/internal/testutil/apps/constant.go b/internal/testutil/apps/constant.go index 943105647dc..584f870a740 100644 --- a/internal/testutil/apps/constant.go +++ b/internal/testutil/apps/constant.go @@ -54,7 +54,7 @@ const ( DefaultRedisCompSpecName = "redis-rsts" DefaultRedisImageName = "redis:7.0.5" DefaultRedisContainerName = "redis" - DefaultRedisInitContainerName = "redis-init-container" + DefaultRedisInitContainerName = "redis-Init-container" Class1c1gName = "general-1c1g" Class2c4gName = "general-2c4g" @@ -271,7 +271,7 @@ var ( Image: DefaultRedisImageName, ImagePullPolicy: corev1.PullIfNotPresent, VolumeMounts: defaultReplicationRedisVolumeMounts, - Command: []string{"/scripts/init.sh"}, + Command: []string{"/scripts/Init.sh"}, Resources: zeroResRequirements, } diff --git a/internal/testutil/apps/deployment_factoy.go b/internal/testutil/apps/deployment_factoy.go index 75bcdf7e8aa..5d5abcca007 100644 --- a/internal/testutil/apps/deployment_factoy.go +++ b/internal/testutil/apps/deployment_factoy.go @@ -33,7 +33,7 @@ type MockDeploymentFactory struct { func NewDeploymentFactory(namespace, name, clusterName, componentName string) *MockDeploymentFactory { f := &MockDeploymentFactory{} - f.init(namespace, name, + f.Init(namespace, name, &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -65,17 +65,17 @@ func NewDeploymentFactory(namespace, name, clusterName, componentName string) *M } func (factory *MockDeploymentFactory) SetMinReadySeconds(minReadySeconds int32) *MockDeploymentFactory { - factory.get().Spec.MinReadySeconds = minReadySeconds + factory.Get().Spec.MinReadySeconds = minReadySeconds return factory } func (factory *MockDeploymentFactory) SetReplicas(replicas int32) *MockDeploymentFactory { - factory.get().Spec.Replicas = &replicas + factory.Get().Spec.Replicas = &replicas return factory } func (factory *MockDeploymentFactory) AddVolume(volume corev1.Volume) *MockDeploymentFactory { - volumes := &factory.get().Spec.Template.Spec.Volumes + volumes := &factory.Get().Spec.Template.Spec.Volumes *volumes = append(*volumes, volume) return factory } @@ -94,7 +94,7 @@ func (factory *MockDeploymentFactory) AddConfigmapVolume(volumeName, configmapNa } func (factory *MockDeploymentFactory) AddContainer(container corev1.Container) *MockDeploymentFactory { - containers := &factory.get().Spec.Template.Spec.Containers + containers := &factory.Get().Spec.Template.Spec.Containers *containers = append(*containers, container) return factory } diff --git a/internal/testutil/apps/pod_factory.go b/internal/testutil/apps/pod_factory.go index 2054fc4452d..4626a68add1 100644 --- a/internal/testutil/apps/pod_factory.go +++ b/internal/testutil/apps/pod_factory.go @@ -29,7 +29,7 @@ type MockPodFactory struct { func NewPodFactory(namespace, name string) *MockPodFactory { f := &MockPodFactory{} - f.init(namespace, name, + f.Init(namespace, name, &corev1.Pod{ Spec: corev1.PodSpec{ Containers: []corev1.Container{}, @@ -39,13 +39,13 @@ func NewPodFactory(namespace, name string) *MockPodFactory { } func (factory *MockPodFactory) AddContainer(container corev1.Container) *MockPodFactory { - containers := &factory.get().Spec.Containers + containers := &factory.Get().Spec.Containers *containers = append(*containers, container) return factory } func (factory *MockPodFactory) AddVolume(volume corev1.Volume) *MockPodFactory { - volumes := &factory.get().Spec.Volumes + volumes := &factory.Get().Spec.Volumes if volumes == nil { volumes = &[]corev1.Volume{} } @@ -54,6 +54,6 @@ func (factory *MockPodFactory) AddVolume(volume corev1.Volume) *MockPodFactory { } func (factory *MockPodFactory) AddNodeName(nodeName string) *MockPodFactory { - factory.get().Spec.NodeName = nodeName + factory.Get().Spec.NodeName = nodeName return factory } diff --git a/internal/testutil/apps/pvc_factoy.go b/internal/testutil/apps/pvc_factoy.go index 3e32cf06b2b..8016eac4003 100644 --- a/internal/testutil/apps/pvc_factoy.go +++ b/internal/testutil/apps/pvc_factoy.go @@ -35,7 +35,7 @@ type MockPersistentVolumeClaimFactory struct { func NewPersistentVolumeClaimFactory(namespace, name, clusterName, componentName, vctName string) *MockPersistentVolumeClaimFactory { f := &MockPersistentVolumeClaimFactory{} volumeMode := corev1.PersistentVolumeFilesystem - f.init(namespace, name, + f.Init(namespace, name, &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -58,12 +58,12 @@ func NewPersistentVolumeClaimFactory(namespace, name, clusterName, componentName } func (factory *MockPersistentVolumeClaimFactory) SetStorageClass(storageClassName string) *MockPersistentVolumeClaimFactory { - factory.get().Spec.StorageClassName = &storageClassName + factory.Get().Spec.StorageClassName = &storageClassName return factory } func (factory *MockPersistentVolumeClaimFactory) SetStorage(storageSize string) *MockPersistentVolumeClaimFactory { - factory.get().Spec.Resources = corev1.ResourceRequirements{ + factory.Get().Spec.Resources = corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse(storageSize), }, @@ -72,6 +72,6 @@ func (factory *MockPersistentVolumeClaimFactory) SetStorage(storageSize string) } func (factory *MockPersistentVolumeClaimFactory) SetAnnotations(annotations map[string]string) *MockPersistentVolumeClaimFactory { - factory.get().Annotations = annotations + factory.Get().Annotations = annotations return factory } diff --git a/internal/testutil/apps/restorejob_factory.go b/internal/testutil/apps/restorejob_factory.go deleted file mode 100644 index 1c46ab9b53c..00000000000 --- a/internal/testutil/apps/restorejob_factory.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" -) - -type MockRestoreJobFactory struct { - BaseFactory[dataprotectionv1alpha1.RestoreJob, *dataprotectionv1alpha1.RestoreJob, MockRestoreJobFactory] -} - -func NewRestoreJobFactory(namespace, name string) *MockRestoreJobFactory { - f := &MockRestoreJobFactory{} - f.init(namespace, name, - &dataprotectionv1alpha1.RestoreJob{ - Spec: dataprotectionv1alpha1.RestoreJobSpec{ - Target: dataprotectionv1alpha1.TargetCluster{ - LabelsSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{}, - }, - }, - }, - }, f) - return f -} - -func (factory *MockRestoreJobFactory) SetBackupJobName(backupJobName string) *MockRestoreJobFactory { - factory.get().Spec.BackupJobName = backupJobName - return factory -} - -func (factory *MockRestoreJobFactory) AddTargetMatchLabels(keyAndValues ...string) *MockRestoreJobFactory { - for k, v := range WithMap(keyAndValues...) { - factory.get().Spec.Target.LabelsSelector.MatchLabels[k] = v - } - return factory -} - -func (factory *MockRestoreJobFactory) SetTargetSecretName(name string) *MockRestoreJobFactory { - factory.get().Spec.Target.Secret = &dataprotectionv1alpha1.BackupPolicySecret{Name: name} - return factory -} - -func (factory *MockRestoreJobFactory) AddTargetVolume(volume corev1.Volume) *MockRestoreJobFactory { - factory.get().Spec.TargetVolumes = append(factory.get().Spec.TargetVolumes, volume) - return factory -} - -func (factory *MockRestoreJobFactory) AddTargetVolumePVC(volumeName, pvcName string) *MockRestoreJobFactory { - volume := corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - } - factory.AddTargetVolume(volume) - return factory -} - -func (factory *MockRestoreJobFactory) AddTargetVolumeMount(volumeMount corev1.VolumeMount) *MockRestoreJobFactory { - factory.get().Spec.TargetVolumeMounts = append(factory.get().Spec.TargetVolumeMounts, volumeMount) - return factory -} diff --git a/internal/testutil/apps/rsm_factoy.go b/internal/testutil/apps/rsm_factoy.go index e7bdee2c6be..6b9e2e11a1f 100644 --- a/internal/testutil/apps/rsm_factoy.go +++ b/internal/testutil/apps/rsm_factoy.go @@ -34,7 +34,7 @@ type MockRSMFactory struct { func NewRSMFactory(namespace, name string, clusterName string, componentName string) *MockRSMFactory { f := &MockRSMFactory{} - f.init(namespace, name, + f.Init(namespace, name, &workloads.ReplicatedStateMachine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -69,12 +69,12 @@ func NewRSMFactory(namespace, name string, clusterName string, componentName str } func (factory *MockRSMFactory) SetReplicas(replicas int32) *MockRSMFactory { - factory.get().Spec.Replicas = &replicas + factory.Get().Spec.Replicas = &replicas return factory } func (factory *MockRSMFactory) AddVolume(volume corev1.Volume) *MockRSMFactory { - volumes := &factory.get().Spec.Template.Spec.Volumes + volumes := &factory.Get().Spec.Template.Spec.Volumes *volumes = append(*volumes, volume) return factory } @@ -93,13 +93,13 @@ func (factory *MockRSMFactory) AddConfigmapVolume(volumeName string, configmapNa } func (factory *MockRSMFactory) AddVolumeClaimTemplate(pvc corev1.PersistentVolumeClaim) *MockRSMFactory { - volumeClaimTpls := &factory.get().Spec.VolumeClaimTemplates + volumeClaimTpls := &factory.Get().Spec.VolumeClaimTemplates *volumeClaimTpls = append(*volumeClaimTpls, pvc) return factory } func (factory *MockRSMFactory) AddContainer(container corev1.Container) *MockRSMFactory { - containers := &factory.get().Spec.Template.Spec.Containers + containers := &factory.Get().Spec.Template.Spec.Containers *containers = append(*containers, container) return factory } diff --git a/internal/testutil/apps/servicedescriptor_factory.go b/internal/testutil/apps/servicedescriptor_factory.go index da482f4f38e..10bf2f28ea1 100644 --- a/internal/testutil/apps/servicedescriptor_factory.go +++ b/internal/testutil/apps/servicedescriptor_factory.go @@ -32,7 +32,7 @@ type MockServiceDescriptorFactory struct { func NewServiceDescriptorFactory(namespace, name string) *MockServiceDescriptorFactory { f := &MockServiceDescriptorFactory{} - f.init(namespace, name, + f.Init(namespace, name, &appsv1alpha1.ServiceDescriptor{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -46,26 +46,26 @@ func NewServiceDescriptorFactory(namespace, name string) *MockServiceDescriptorF } func (factory *MockServiceDescriptorFactory) SetServiceKind(serviceKind string) *MockServiceDescriptorFactory { - factory.get().Spec.ServiceKind = serviceKind + factory.Get().Spec.ServiceKind = serviceKind return factory } func (factory *MockServiceDescriptorFactory) SetServiceVersion(serviceVersion string) *MockServiceDescriptorFactory { - factory.get().Spec.ServiceVersion = serviceVersion + factory.Get().Spec.ServiceVersion = serviceVersion return factory } func (factory *MockServiceDescriptorFactory) SetEndpoint(endpoint appsv1alpha1.CredentialVar) *MockServiceDescriptorFactory { - factory.get().Spec.Endpoint = &endpoint + factory.Get().Spec.Endpoint = &endpoint return factory } func (factory *MockServiceDescriptorFactory) SetPort(port appsv1alpha1.CredentialVar) *MockServiceDescriptorFactory { - factory.get().Spec.Port = &port + factory.Get().Spec.Port = &port return factory } func (factory *MockServiceDescriptorFactory) SetAuth(auth appsv1alpha1.ConnectionCredentialAuth) *MockServiceDescriptorFactory { - factory.get().Spec.Auth = &auth + factory.Get().Spec.Auth = &auth return factory } diff --git a/internal/testutil/apps/statefulset_factoy.go b/internal/testutil/apps/statefulset_factoy.go index 18366814f88..d94e64e0c98 100644 --- a/internal/testutil/apps/statefulset_factoy.go +++ b/internal/testutil/apps/statefulset_factoy.go @@ -33,7 +33,7 @@ type MockStatefulSetFactory struct { func NewStatefulSetFactory(namespace, name string, clusterName string, componentName string) *MockStatefulSetFactory { f := &MockStatefulSetFactory{} - f.init(namespace, name, + f.Init(namespace, name, &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -68,12 +68,12 @@ func NewStatefulSetFactory(namespace, name string, clusterName string, component } func (factory *MockStatefulSetFactory) SetReplicas(replicas int32) *MockStatefulSetFactory { - factory.get().Spec.Replicas = &replicas + factory.Get().Spec.Replicas = &replicas return factory } func (factory *MockStatefulSetFactory) AddVolume(volume corev1.Volume) *MockStatefulSetFactory { - volumes := &factory.get().Spec.Template.Spec.Volumes + volumes := &factory.Get().Spec.Template.Spec.Volumes *volumes = append(*volumes, volume) return factory } @@ -92,13 +92,13 @@ func (factory *MockStatefulSetFactory) AddConfigmapVolume(volumeName string, con } func (factory *MockStatefulSetFactory) AddVolumeClaimTemplate(pvc corev1.PersistentVolumeClaim) *MockStatefulSetFactory { - volumeClaimTpls := &factory.get().Spec.VolumeClaimTemplates + volumeClaimTpls := &factory.Get().Spec.VolumeClaimTemplates *volumeClaimTpls = append(*volumeClaimTpls, pvc) return factory } func (factory *MockStatefulSetFactory) AddContainer(container corev1.Container) *MockStatefulSetFactory { - containers := &factory.get().Spec.Template.Spec.Containers + containers := &factory.Get().Spec.Template.Spec.Containers *containers = append(*containers, container) return factory } diff --git a/internal/testutil/dataprotection/backup_factory.go b/internal/testutil/dataprotection/backup_factory.go new file mode 100644 index 00000000000..8bab1ef48b8 --- /dev/null +++ b/internal/testutil/dataprotection/backup_factory.go @@ -0,0 +1,68 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +type MockBackupFactory struct { + testapps.BaseFactory[dpv1alpha1.Backup, *dpv1alpha1.Backup, MockBackupFactory] +} + +func NewBackupFactory(namespace, name string) *MockBackupFactory { + f := &MockBackupFactory{} + f.Init(namespace, name, + &dpv1alpha1.Backup{ + Spec: dpv1alpha1.BackupSpec{}, + }, f) + return f +} + +func (f *MockBackupFactory) SetBackupPolicyName(backupPolicyName string) *MockBackupFactory { + f.Get().Spec.BackupPolicyName = backupPolicyName + return f +} + +func (f *MockBackupFactory) SetBackupMethod(backupMethod string) *MockBackupFactory { + f.Get().Spec.BackupMethod = backupMethod + return f +} + +func (f *MockBackupFactory) SetLabels(labels map[string]string) *MockBackupFactory { + f.Get().SetLabels(labels) + return f +} + +func (f *MockBackupFactory) SetBackupTimeRange(startTime, stopTime time.Time) *MockBackupFactory { + tr := f.Get().Status.TimeRange + if tr == nil { + tr = &dpv1alpha1.BackupTimeRange{} + } + tr.Start = &metav1.Time{Time: startTime} + tr.End = &metav1.Time{Time: stopTime} + f.Get().Status.TimeRange = tr + return f +} diff --git a/internal/testutil/dataprotection/backup_utils.go b/internal/testutil/dataprotection/backup_utils.go new file mode 100644 index 00000000000..8dc770b98e3 --- /dev/null +++ b/internal/testutil/dataprotection/backup_utils.go @@ -0,0 +1,223 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + "github.com/apecloud/kubeblocks/internal/dataprotection/utils/boolptr" + "github.com/apecloud/kubeblocks/internal/testutil" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +func NewFakeActionSet(testCtx *testutil.TestContext) *dpv1alpha1.ActionSet { + as := testapps.CreateCustomizedObj(testCtx, "backup/actionset.yaml", + &dpv1alpha1.ActionSet{}, testapps.WithName(ActionSetName)) + Eventually(testapps.CheckObj(testCtx, client.ObjectKeyFromObject(as), + func(g Gomega, as *dpv1alpha1.ActionSet) { + g.Expect(as.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.AvailablePhase)) + })).Should(Succeed()) + return as +} + +func NewFakeBackupPolicy(testCtx *testutil.TestContext, + change func(backupPolicy *dpv1alpha1.BackupPolicy)) *dpv1alpha1.BackupPolicy { + bp := NewBackupPolicyFactory(testCtx.DefaultNamespace, BackupPolicyName). + SetBackupRepoName(BackupRepoName). + SetTarget(constant.AppInstanceLabelKey, ClusterName, + constant.KBAppComponentLabelKey, ComponentName, + constant.RoleLabelKey, constant.Leader). + SetPathPrefix(BackupPathPrefix). + SetTargetConnectionCredential(ClusterName). + AddBackupMethod(BackupMethodName, false, ActionSetName). + SetBackupMethodVolumeMounts(DataVolumeName, DataVolumeMountPath, + LogVolumeName, LogVolumeMountPath). + AddBackupMethod(VSBackupMethodName, true, ""). + SetBackupMethodVolumes([]string{DataVolumeName}). + Apply(change). + Create(testCtx).GetObject() + Eventually(testapps.CheckObj(testCtx, client.ObjectKeyFromObject(bp), + func(g Gomega, bp *dpv1alpha1.BackupPolicy) { + g.Expect(bp.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.AvailablePhase)) + })).Should(Succeed()) + return bp +} + +func NewFakeStorageProvider(testCtx *testutil.TestContext, + change func(sp *storagev1alpha1.StorageProvider)) *storagev1alpha1.StorageProvider { + sp := testapps.CreateCustomizedObj(testCtx, "backup/storageprovider.yaml", + &storagev1alpha1.StorageProvider{}, func(obj *storagev1alpha1.StorageProvider) { + obj.Name = StorageProviderName + if change != nil { + change(obj) + } + }) + // the storage provider controller is not running, so set the status manually + Expect(testapps.ChangeObjStatus(testCtx, sp, func() { + sp.Status.Phase = storagev1alpha1.StorageProviderReady + })).Should(Succeed()) + return sp +} + +func NewFakeBackupRepo(testCtx *testutil.TestContext, + change func(repo *dpv1alpha1.BackupRepo)) (*dpv1alpha1.BackupRepo, string) { + repo := testapps.CreateCustomizedObj(testCtx, "backup/backuprepo.yaml", + &dpv1alpha1.BackupRepo{}, func(obj *dpv1alpha1.BackupRepo) { + obj.Name = BackupRepoName + obj.Spec.StorageProviderRef = StorageProviderName + if change != nil { + change(obj) + } + }) + var name string + Eventually(testapps.CheckObj(testCtx, client.ObjectKeyFromObject(repo), + func(g Gomega, repo *dpv1alpha1.BackupRepo) { + g.Expect(repo.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.BackupRepoReady)) + g.Expect(repo.Status.BackupPVCName).ShouldNot(BeEmpty()) + name = repo.Status.BackupPVCName + })).Should(Succeed()) + return repo, name +} + +func NewFakeBackup(testCtx *testutil.TestContext, + change func(backup *dpv1alpha1.Backup)) *dpv1alpha1.Backup { + if change == nil { + change = func(*dpv1alpha1.Backup) {} // set nop + } + backup := NewBackupFactory(testCtx.DefaultNamespace, BackupName). + SetBackupPolicyName(BackupPolicyName). + SetBackupMethod(BackupMethodName). + Apply(change). + Create(testCtx).GetObject() + return backup +} + +func NewFakeCluster(testCtx *testutil.TestContext) *BackupClusterInfo { + createPVC := func(name string) *corev1.PersistentVolumeClaim { + return testapps.NewPersistentVolumeClaimFactory( + testCtx.DefaultNamespace, name, ClusterName, ComponentName, "data"). + SetStorage("1Gi"). + SetStorageClass(StorageClassName). + Create(testCtx).GetObject() + } + + podFactory := func(name string) *testapps.MockPodFactory { + return testapps.NewPodFactory(testCtx.DefaultNamespace, name). + AddAppInstanceLabel(ClusterName). + AddAppComponentLabel(ComponentName). + AddContainer(corev1.Container{Name: ContainerName, Image: testapps.ApeCloudMySQLImage}) + } + + By("mocking a cluster") + cluster := testapps.NewClusterFactory(testCtx.DefaultNamespace, ClusterName, + "test-cd", "test-cv").Create(testCtx).GetObject() + podName := ClusterName + "-" + ComponentName + + By("mocking a storage class") + _ = testapps.CreateStorageClass(testCtx, StorageClassName, true) + + By("mocking a pvc belonging to the pod 0") + pvc := createPVC("data-" + podName + "-0") + + By("mocking a pvc belonging to the pod 1") + pvc1 := createPVC("data-" + podName + "-1") + + By("mocking pod 0 belonging to the statefulset") + volume := corev1.Volume{Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}}} + pod := podFactory(podName + "-0"). + AddRoleLabel("leader"). + AddVolume(volume). + Create(testCtx).GetObject() + + By("mocking pod 1 belonging to the statefulset") + volume2 := corev1.Volume{Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: pvc1.Name}}} + _ = podFactory(podName + "-1"). + AddVolume(volume2). + Create(testCtx).GetObject() + + return &BackupClusterInfo{ + Cluster: cluster, + TargetPod: pod, + TargetPVC: pvc.Name, + } +} + +func NewFakeBackupSchedule(testCtx *testutil.TestContext, + change func(schedule *dpv1alpha1.BackupSchedule)) *dpv1alpha1.BackupSchedule { + schedule := NewBackupScheduleFactory(testCtx.DefaultNamespace, BackupScheduleName). + SetBackupPolicyName(BackupPolicyName). + SetStartingDeadlineMinutes(StartingDeadlineMinutes). + AddSchedulePolicy(dpv1alpha1.SchedulePolicy{ + Enabled: boolptr.False(), + BackupMethod: BackupMethodName, + CronExpression: BackupScheduleCron, + RetentionPeriod: BackupRetention, + }). + AddSchedulePolicy(dpv1alpha1.SchedulePolicy{ + Enabled: boolptr.False(), + BackupMethod: VSBackupMethodName, + CronExpression: BackupScheduleCron, + RetentionPeriod: BackupRetention, + }). + Apply(change). + Create(testCtx).GetObject() + return schedule +} + +// EnableBackupSchedule enables the backup schedule that matches the given method. +func EnableBackupSchedule(testCtx *testutil.TestContext, + backupSchedule *dpv1alpha1.BackupSchedule, method string) { + Eventually(testapps.ChangeObj(testCtx, backupSchedule, func(schedule *dpv1alpha1.BackupSchedule) { + for i := range schedule.Spec.Schedules { + if schedule.Spec.Schedules[i].BackupMethod == method { + schedule.Spec.Schedules[i].Enabled = boolptr.True() + break + } + } + })).Should(Succeed()) +} + +func MockBackupStatusMethod(backup *dpv1alpha1.Backup, targetVolume string) { + snapshot := utils.VolumeSnapshotEnabled() + backupMethod := BackupMethodName + if snapshot { + backupMethod = VSBackupMethodName + } + backup.Status.BackupMethod = &dpv1alpha1.BackupMethod{ + Name: backupMethod, + SnapshotVolumes: &snapshot, + TargetVolumes: &dpv1alpha1.TargetVolumeInfo{ + Volumes: []string{targetVolume}, + VolumeMounts: []corev1.VolumeMount{ + {Name: targetVolume, MountPath: "/"}, + }, + }, + } +} diff --git a/internal/testutil/dataprotection/backuppolicy_factory.go b/internal/testutil/dataprotection/backuppolicy_factory.go new file mode 100644 index 00000000000..f97b2e8a6bc --- /dev/null +++ b/internal/testutil/dataprotection/backuppolicy_factory.go @@ -0,0 +1,106 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +type MockBackupPolicyFactory struct { + testapps.BaseFactory[dpv1alpha1.BackupPolicy, *dpv1alpha1.BackupPolicy, MockBackupPolicyFactory] +} + +func NewBackupPolicyFactory(namespace, name string) *MockBackupPolicyFactory { + f := &MockBackupPolicyFactory{} + f.Init(namespace, name, &dpv1alpha1.BackupPolicy{}, f) + return f +} + +func (f *MockBackupPolicyFactory) SetBackupRepoName(backupRepoName string) *MockBackupPolicyFactory { + if backupRepoName == "" { + f.Get().Spec.BackupRepoName = nil + } else { + f.Get().Spec.BackupRepoName = &backupRepoName + } + return f +} + +func (f *MockBackupPolicyFactory) SetPathPrefix(pathPrefix string) *MockBackupPolicyFactory { + f.Get().Spec.PathPrefix = pathPrefix + return f +} + +func (f *MockBackupPolicyFactory) SetBackoffLimit(backoffLimit int32) *MockBackupPolicyFactory { + f.Get().Spec.BackoffLimit = &backoffLimit + return f +} + +func (f *MockBackupPolicyFactory) AddBackupMethod(name string, + snapshotVolumes bool, actionSetName string) *MockBackupPolicyFactory { + f.Get().Spec.BackupMethods = append(f.Get().Spec.BackupMethods, + dpv1alpha1.BackupMethod{ + Name: name, + SnapshotVolumes: &snapshotVolumes, + ActionSetName: actionSetName, + TargetVolumes: &dpv1alpha1.TargetVolumeInfo{}, + }) + return f +} + +func (f *MockBackupPolicyFactory) SetBackupMethodVolumes(names []string) *MockBackupPolicyFactory { + f.Get().Spec.BackupMethods[len(f.Get().Spec.BackupMethods)-1].TargetVolumes.Volumes = names + return f +} + +func (f *MockBackupPolicyFactory) SetBackupMethodVolumeMounts(keyAndValues ...string) *MockBackupPolicyFactory { + var volumeMounts []corev1.VolumeMount + for k, v := range testapps.WithMap(keyAndValues...) { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: k, + MountPath: v, + }) + } + f.Get().Spec.BackupMethods[len(f.Get().Spec.BackupMethods)-1].TargetVolumes.VolumeMounts = volumeMounts + return f +} + +func (f *MockBackupPolicyFactory) SetTarget(keyAndValues ...string) *MockBackupPolicyFactory { + f.Get().Spec.Target = &dpv1alpha1.BackupTarget{ + PodSelector: &dpv1alpha1.PodSelector{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: testapps.WithMap(keyAndValues...), + }, + }, + } + return f +} + +func (f *MockBackupPolicyFactory) SetTargetConnectionCredential(secretName string) *MockBackupPolicyFactory { + f.Get().Spec.Target.ConnectionCredential = &dpv1alpha1.ConnectionCredential{ + SecretName: secretName, + UsernameKey: "username", + PasswordKey: "password", + } + return f +} diff --git a/internal/testutil/apps/backuprepo_factory.go b/internal/testutil/dataprotection/backuprepo_factory.go similarity index 69% rename from internal/testutil/apps/backuprepo_factory.go rename to internal/testutil/dataprotection/backuprepo_factory.go index 4e4b1e885f4..48a9b74c292 100644 --- a/internal/testutil/apps/backuprepo_factory.go +++ b/internal/testutil/dataprotection/backuprepo_factory.go @@ -17,25 +17,26 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package apps +package dataprotection import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "github.com/apecloud/kubeblocks/internal/constant" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" ) type MockBackupRepoFactory struct { - BaseFactory[dataprotectionv1alpha1.BackupRepo, *dataprotectionv1alpha1.BackupRepo, MockBackupRepoFactory] + testapps.BaseFactory[dpv1alpha1.BackupRepo, *dpv1alpha1.BackupRepo, MockBackupRepoFactory] } func NewBackupRepoFactory(namespace, name string) *MockBackupRepoFactory { f := &MockBackupRepoFactory{} - f.init(namespace, name, - &dataprotectionv1alpha1.BackupRepo{ - Spec: dataprotectionv1alpha1.BackupRepoSpec{ + f.Init(namespace, name, + &dpv1alpha1.BackupRepo{ + Spec: dpv1alpha1.BackupRepoSpec{ VolumeCapacity: resource.MustParse("100Gi"), PVReclaimPolicy: "Retain", }, @@ -44,39 +45,39 @@ func NewBackupRepoFactory(namespace, name string) *MockBackupRepoFactory { } func (factory *MockBackupRepoFactory) SetStorageProviderRef(providerName string) *MockBackupRepoFactory { - factory.get().Spec.StorageProviderRef = providerName + factory.Get().Spec.StorageProviderRef = providerName return factory } func (factory *MockBackupRepoFactory) SetVolumeCapacity(amount string) *MockBackupRepoFactory { - factory.get().Spec.VolumeCapacity = resource.MustParse(amount) + factory.Get().Spec.VolumeCapacity = resource.MustParse(amount) return factory } func (factory *MockBackupRepoFactory) SetPVReclaimPolicy(policy string) *MockBackupRepoFactory { - factory.get().Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimPolicy(policy) + factory.Get().Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimPolicy(policy) return factory } func (factory *MockBackupRepoFactory) SetConfig(config map[string]string) *MockBackupRepoFactory { - factory.get().Spec.Config = config + factory.Get().Spec.Config = config return factory } func (factory *MockBackupRepoFactory) SetCredential(ref *corev1.SecretReference) *MockBackupRepoFactory { - factory.get().Spec.Credential = ref + factory.Get().Spec.Credential = ref return factory } func (factory *MockBackupRepoFactory) SetAsDefaultRepo(v bool) *MockBackupRepoFactory { if v { - obj := factory.get() + obj := factory.Get() if obj.Annotations == nil { obj.Annotations = map[string]string{} } - obj.Annotations[constant.DefaultBackupRepoAnnotationKey] = "true" + obj.Annotations[dptypes.DefaultBackupRepoAnnotationKey] = "true" } else { - delete(factory.get().Annotations, constant.DefaultBackupRepoAnnotationKey) + delete(factory.Get().Annotations, dptypes.DefaultBackupRepoAnnotationKey) } return factory } diff --git a/internal/testutil/dataprotection/backupschedule_factory.go b/internal/testutil/dataprotection/backupschedule_factory.go new file mode 100644 index 00000000000..ad59caf1946 --- /dev/null +++ b/internal/testutil/dataprotection/backupschedule_factory.go @@ -0,0 +1,56 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +type BackupScheduleFactory struct { + testapps.BaseFactory[dpv1alpha1.BackupSchedule, *dpv1alpha1.BackupSchedule, BackupScheduleFactory] +} + +func NewBackupScheduleFactory(namespace, name string) *BackupScheduleFactory { + f := &BackupScheduleFactory{} + f.Init(namespace, name, &dpv1alpha1.BackupSchedule{}, f) + f.Get().Spec.Schedules = []dpv1alpha1.SchedulePolicy{} + return f +} + +func (f *BackupScheduleFactory) SetBackupPolicyName(backupPolicyName string) *BackupScheduleFactory { + f.Get().Spec.BackupPolicyName = backupPolicyName + return f +} + +func (f *BackupScheduleFactory) SetStartingDeadlineMinutes(minutes int64) *BackupScheduleFactory { + f.Get().Spec.StartingDeadlineMinutes = &minutes + return f +} + +func (f *BackupScheduleFactory) AddSchedulePolicy(schedulePolicy dpv1alpha1.SchedulePolicy) *BackupScheduleFactory { + f.Get().Spec.Schedules = append(f.Get().Spec.Schedules, schedulePolicy) + return f +} + +func (f *BackupScheduleFactory) SetSchedules(schedules []dpv1alpha1.SchedulePolicy) *BackupScheduleFactory { + f.Get().Spec.Schedules = schedules + return f +} diff --git a/internal/testutil/dataprotection/constant.go b/internal/testutil/dataprotection/constant.go new file mode 100644 index 00000000000..e873292ef08 --- /dev/null +++ b/internal/testutil/dataprotection/constant.go @@ -0,0 +1,50 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +const ( + ClusterName = "test-cluster" + ComponentName = "test-comp" + ContainerName = "test-container" + + BackupName = "test-backup" + BackupRepoName = "test-repo" + BackupPolicyName = "test-backup-policy" + BackupMethodName = "xtrabackup" + VSBackupMethodName = "volume-snapshot" + BackupPathPrefix = "/backup" + ActionSetName = "xtrabackup" + VSActionSetName = "volume-snapshot" + + DataVolumeName = "data" + DataVolumeMountPath = "/data" + LogVolumeName = "log" + LogVolumeMountPath = "/log" + + StorageProviderName = "test-sp" + StorageClassName = "test-sc" + + BackupScheduleName = "test-backup-schedule" + BackupScheduleCron = "0 3 * * *" + BackupRetention = "7d" + StartingDeadlineMinutes = 10 + + KBToolImage = "apecloud/kubeblocks-tool:latest" +) diff --git a/internal/testutil/dataprotection/k8s_utils.go b/internal/testutil/dataprotection/k8s_utils.go new file mode 100644 index 00000000000..c332d894929 --- /dev/null +++ b/internal/testutil/dataprotection/k8s_utils.go @@ -0,0 +1,33 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + corev1 "k8s.io/api/core/v1" + + "github.com/apecloud/kubeblocks/internal/testutil" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +func NewFakePVC(testCtx *testutil.TestContext, name string) *corev1.PersistentVolumeClaim { + return testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, name, "", "", ""). + SetStorage("1Gi"). + Create(testCtx).GetObject() +} diff --git a/internal/testutil/dataprotection/restore_factory.go b/internal/testutil/dataprotection/restore_factory.go new file mode 100644 index 00000000000..8e8f3048f9f --- /dev/null +++ b/internal/testutil/dataprotection/restore_factory.go @@ -0,0 +1,86 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + corev1 "k8s.io/api/core/v1" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +type MockRestoreFactory struct { + testapps.BaseFactory[dpv1alpha1.Restore, *dpv1alpha1.Restore, MockRestoreFactory] +} + +func NewRestoreJobFactory(namespace, name string) *MockRestoreFactory { + f := &MockRestoreFactory{} + // f.init(namespace, name, + // &dpv1alpha1.RestoreJob{ + // Spec: dpv1alpha1.RestoreJobSpec{ + // Target: dpv1alpha1.TargetCluster{ + // LabelsSelector: &metav1.LabelSelector{ + // MatchLabels: map[string]string{}, + // }, + // }, + // }, + // }, f) + return f +} + +func (factory *MockRestoreFactory) SetBackupName(backupName string) *MockRestoreFactory { + // factory.get().Spec.Backup.Name = backupName + return factory +} + +func (factory *MockRestoreFactory) AddTargetMatchLabels(keyAndValues ...string) *MockRestoreFactory { + // for k, v := range WithMap(keyAndValues...) { + // factory.get().Spec.Target.LabelsSelector.MatchLabels[k] = v + // } + return factory +} + +func (factory *MockRestoreFactory) SetTargetSecretName(name string) *MockRestoreFactory { + // factory.get().Spec.Target.Secret = &dataprotectionv1alpha1.BackupPolicySecret{Name: name} + return factory +} + +func (factory *MockRestoreFactory) AddTargetVolume(volume corev1.Volume) *MockRestoreFactory { + // factory.get().Spec.TargetVolumes = append(factory.get().Spec.TargetVolumes, volume) + return factory +} + +func (factory *MockRestoreFactory) AddTargetVolumePVC(volumeName, pvcName string) *MockRestoreFactory { + volume := corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + } + factory.AddTargetVolume(volume) + return factory +} + +func (factory *MockRestoreFactory) AddTargetVolumeMount(volumeMount corev1.VolumeMount) *MockRestoreFactory { + // factory.get().Spec.TargetVolumeMounts = append(factory.get().Spec.TargetVolumeMounts, volumeMount) + return factory +} diff --git a/internal/testutil/dataprotection/types.go b/internal/testutil/dataprotection/types.go new file mode 100644 index 00000000000..3c249ea1c85 --- /dev/null +++ b/internal/testutil/dataprotection/types.go @@ -0,0 +1,32 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + corev1 "k8s.io/api/core/v1" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" +) + +type BackupClusterInfo struct { + Cluster *appsv1alpha1.Cluster + TargetPod *corev1.Pod + TargetPVC string +} diff --git a/internal/testutil/dataprotection/utils.go b/internal/testutil/dataprotection/utils.go new file mode 100644 index 00000000000..368bd34a9fe --- /dev/null +++ b/internal/testutil/dataprotection/utils.go @@ -0,0 +1,60 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + . "github.com/onsi/gomega" + + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "github.com/apecloud/kubeblocks/internal/testutil" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +func PatchK8sJobStatus(testCtx *testutil.TestContext, key client.ObjectKey, jobStatus batchv1.JobConditionType) { + Eventually(testapps.GetAndChangeObjStatus(testCtx, key, func(fetched *batchv1.Job) { + jobCondition := batchv1.JobCondition{Type: jobStatus, Status: corev1.ConditionTrue} + fetched.Status.Conditions = append(fetched.Status.Conditions, jobCondition) + })).Should(Succeed()) +} + +func ReplaceK8sJobStatus(testCtx *testutil.TestContext, key client.ObjectKey, jobStatus batchv1.JobConditionType) { + Eventually(testapps.GetAndChangeObjStatus(testCtx, key, func(fetched *batchv1.Job) { + jobCondition := batchv1.JobCondition{Type: jobStatus, Status: corev1.ConditionTrue} + fetched.Status.Conditions = []batchv1.JobCondition{jobCondition} + })).Should(Succeed()) +} + +func PatchVolumeSnapshotStatus(testCtx *testutil.TestContext, key client.ObjectKey, readyToUse bool) { + Eventually(testapps.GetAndChangeObjStatus(testCtx, key, func(fetched *vsv1.VolumeSnapshot) { + snapStatus := vsv1.VolumeSnapshotStatus{ReadyToUse: &readyToUse} + fetched.Status = &snapStatus + })).Should(Succeed()) +} + +func PatchBackupStatus(testCtx *testutil.TestContext, key client.ObjectKey, status dpv1alpha1.BackupStatus) { + Eventually(testapps.GetAndChangeObjStatus(testCtx, key, func(fetched *dpv1alpha1.Backup) { + fetched.Status = status + })).Should(Succeed()) +} diff --git a/internal/testutil/dataprotection/vs_factory.go b/internal/testutil/dataprotection/vs_factory.go new file mode 100644 index 00000000000..4f5aa3c071e --- /dev/null +++ b/internal/testutil/dataprotection/vs_factory.go @@ -0,0 +1,44 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" +) + +type MockVolumeSnapshotFactory struct { + testapps.BaseFactory[vsv1.VolumeSnapshot, *vsv1.VolumeSnapshot, MockVolumeSnapshotFactory] +} + +func NewVolumeSnapshotFactory(namespace, name string) *MockVolumeSnapshotFactory { + f := &MockVolumeSnapshotFactory{} + f.Init(namespace, name, + &vsv1.VolumeSnapshot{ + Spec: vsv1.VolumeSnapshotSpec{}, + }, f) + return f +} + +func (f *MockVolumeSnapshotFactory) SetSourcePVCName(name string) *MockVolumeSnapshotFactory { + f.Get().Spec.Source.PersistentVolumeClaimName = &name + return f +} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/actionset.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/actionset.go new file mode 100644 index 00000000000..ef49d4eb84b --- /dev/null +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/actionset.go @@ -0,0 +1,184 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + scheme "github.com/apecloud/kubeblocks/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ActionSetsGetter has a method to return a ActionSetInterface. +// A group's client should implement this interface. +type ActionSetsGetter interface { + ActionSets() ActionSetInterface +} + +// ActionSetInterface has methods to work with ActionSet resources. +type ActionSetInterface interface { + Create(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.CreateOptions) (*v1alpha1.ActionSet, error) + Update(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.UpdateOptions) (*v1alpha1.ActionSet, error) + UpdateStatus(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.UpdateOptions) (*v1alpha1.ActionSet, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ActionSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ActionSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ActionSet, err error) + ActionSetExpansion +} + +// actionSets implements ActionSetInterface +type actionSets struct { + client rest.Interface +} + +// newActionSets returns a ActionSets +func newActionSets(c *DataprotectionV1alpha1Client) *actionSets { + return &actionSets{ + client: c.RESTClient(), + } +} + +// Get takes name of the actionSet, and returns the corresponding actionSet object, and an error if there is any. +func (c *actionSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ActionSet, err error) { + result = &v1alpha1.ActionSet{} + err = c.client.Get(). + Resource("actionsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ActionSets that match those selectors. +func (c *actionSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ActionSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ActionSetList{} + err = c.client.Get(). + Resource("actionsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested actionSets. +func (c *actionSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("actionsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a actionSet and creates it. Returns the server's representation of the actionSet, and an error, if there is any. +func (c *actionSets) Create(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.CreateOptions) (result *v1alpha1.ActionSet, err error) { + result = &v1alpha1.ActionSet{} + err = c.client.Post(). + Resource("actionsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(actionSet). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a actionSet and updates it. Returns the server's representation of the actionSet, and an error, if there is any. +func (c *actionSets) Update(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.UpdateOptions) (result *v1alpha1.ActionSet, err error) { + result = &v1alpha1.ActionSet{} + err = c.client.Put(). + Resource("actionsets"). + Name(actionSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(actionSet). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *actionSets) UpdateStatus(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.UpdateOptions) (result *v1alpha1.ActionSet, err error) { + result = &v1alpha1.ActionSet{} + err = c.client.Put(). + Resource("actionsets"). + Name(actionSet.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(actionSet). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the actionSet and deletes it. Returns an error if one occurs. +func (c *actionSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("actionsets"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *actionSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("actionsets"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched actionSet. +func (c *actionSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ActionSet, err error) { + result = &v1alpha1.ActionSet{} + err = c.client.Patch(pt). + Resource("actionsets"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backupschedule.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backupschedule.go new file mode 100644 index 00000000000..5094b4a3a9b --- /dev/null +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backupschedule.go @@ -0,0 +1,195 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + scheme "github.com/apecloud/kubeblocks/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupSchedulesGetter has a method to return a BackupScheduleInterface. +// A group's client should implement this interface. +type BackupSchedulesGetter interface { + BackupSchedules(namespace string) BackupScheduleInterface +} + +// BackupScheduleInterface has methods to work with BackupSchedule resources. +type BackupScheduleInterface interface { + Create(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.CreateOptions) (*v1alpha1.BackupSchedule, error) + Update(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.UpdateOptions) (*v1alpha1.BackupSchedule, error) + UpdateStatus(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.UpdateOptions) (*v1alpha1.BackupSchedule, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BackupSchedule, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupScheduleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupSchedule, err error) + BackupScheduleExpansion +} + +// backupSchedules implements BackupScheduleInterface +type backupSchedules struct { + client rest.Interface + ns string +} + +// newBackupSchedules returns a BackupSchedules +func newBackupSchedules(c *DataprotectionV1alpha1Client, namespace string) *backupSchedules { + return &backupSchedules{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backupSchedule, and returns the corresponding backupSchedule object, and an error if there is any. +func (c *backupSchedules) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupSchedule, err error) { + result = &v1alpha1.BackupSchedule{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupschedules"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupSchedules that match those selectors. +func (c *backupSchedules) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupScheduleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.BackupScheduleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupschedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupSchedules. +func (c *backupSchedules) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backupschedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backupSchedule and creates it. Returns the server's representation of the backupSchedule, and an error, if there is any. +func (c *backupSchedules) Create(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.CreateOptions) (result *v1alpha1.BackupSchedule, err error) { + result = &v1alpha1.BackupSchedule{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backupschedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupSchedule). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backupSchedule and updates it. Returns the server's representation of the backupSchedule, and an error, if there is any. +func (c *backupSchedules) Update(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.UpdateOptions) (result *v1alpha1.BackupSchedule, err error) { + result = &v1alpha1.BackupSchedule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupschedules"). + Name(backupSchedule.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupSchedule). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backupSchedules) UpdateStatus(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.UpdateOptions) (result *v1alpha1.BackupSchedule, err error) { + result = &v1alpha1.BackupSchedule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupschedules"). + Name(backupSchedule.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupSchedule). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backupSchedule and deletes it. Returns an error if one occurs. +func (c *backupSchedules) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backupschedules"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupSchedules) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("backupschedules"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backupSchedule. +func (c *backupSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupSchedule, err error) { + result = &v1alpha1.BackupSchedule{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backupschedules"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backuptool.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backuptool.go deleted file mode 100644 index 2362bf20450..00000000000 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/backuptool.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - scheme "github.com/apecloud/kubeblocks/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// BackupToolsGetter has a method to return a BackupToolInterface. -// A group's client should implement this interface. -type BackupToolsGetter interface { - BackupTools() BackupToolInterface -} - -// BackupToolInterface has methods to work with BackupTool resources. -type BackupToolInterface interface { - Create(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.CreateOptions) (*v1alpha1.BackupTool, error) - Update(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.UpdateOptions) (*v1alpha1.BackupTool, error) - UpdateStatus(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.UpdateOptions) (*v1alpha1.BackupTool, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BackupTool, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupToolList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupTool, err error) - BackupToolExpansion -} - -// backupTools implements BackupToolInterface -type backupTools struct { - client rest.Interface -} - -// newBackupTools returns a BackupTools -func newBackupTools(c *DataprotectionV1alpha1Client) *backupTools { - return &backupTools{ - client: c.RESTClient(), - } -} - -// Get takes name of the backupTool, and returns the corresponding backupTool object, and an error if there is any. -func (c *backupTools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupTool, err error) { - result = &v1alpha1.BackupTool{} - err = c.client.Get(). - Resource("backuptools"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of BackupTools that match those selectors. -func (c *backupTools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupToolList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.BackupToolList{} - err = c.client.Get(). - Resource("backuptools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested backupTools. -func (c *backupTools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("backuptools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a backupTool and creates it. Returns the server's representation of the backupTool, and an error, if there is any. -func (c *backupTools) Create(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.CreateOptions) (result *v1alpha1.BackupTool, err error) { - result = &v1alpha1.BackupTool{} - err = c.client.Post(). - Resource("backuptools"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(backupTool). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a backupTool and updates it. Returns the server's representation of the backupTool, and an error, if there is any. -func (c *backupTools) Update(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.UpdateOptions) (result *v1alpha1.BackupTool, err error) { - result = &v1alpha1.BackupTool{} - err = c.client.Put(). - Resource("backuptools"). - Name(backupTool.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(backupTool). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *backupTools) UpdateStatus(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.UpdateOptions) (result *v1alpha1.BackupTool, err error) { - result = &v1alpha1.BackupTool{} - err = c.client.Put(). - Resource("backuptools"). - Name(backupTool.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(backupTool). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the backupTool and deletes it. Returns an error if one occurs. -func (c *backupTools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("backuptools"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *backupTools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("backuptools"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched backupTool. -func (c *backupTools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupTool, err error) { - result = &v1alpha1.BackupTool{} - err = c.client.Patch(pt). - Resource("backuptools"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/dataprotection_client.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/dataprotection_client.go index 37e95aeff31..54579c00671 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/dataprotection_client.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/dataprotection_client.go @@ -28,11 +28,12 @@ import ( type DataprotectionV1alpha1Interface interface { RESTClient() rest.Interface + ActionSetsGetter BackupsGetter BackupPoliciesGetter BackupReposGetter - BackupToolsGetter - RestoreJobsGetter + BackupSchedulesGetter + RestoresGetter } // DataprotectionV1alpha1Client is used to interact with features provided by the dataprotection.kubeblocks.io group. @@ -40,6 +41,10 @@ type DataprotectionV1alpha1Client struct { restClient rest.Interface } +func (c *DataprotectionV1alpha1Client) ActionSets() ActionSetInterface { + return newActionSets(c) +} + func (c *DataprotectionV1alpha1Client) Backups(namespace string) BackupInterface { return newBackups(c, namespace) } @@ -52,12 +57,12 @@ func (c *DataprotectionV1alpha1Client) BackupRepos() BackupRepoInterface { return newBackupRepos(c) } -func (c *DataprotectionV1alpha1Client) BackupTools() BackupToolInterface { - return newBackupTools(c) +func (c *DataprotectionV1alpha1Client) BackupSchedules(namespace string) BackupScheduleInterface { + return newBackupSchedules(c, namespace) } -func (c *DataprotectionV1alpha1Client) RestoreJobs(namespace string) RestoreJobInterface { - return newRestoreJobs(c, namespace) +func (c *DataprotectionV1alpha1Client) Restores(namespace string) RestoreInterface { + return newRestores(c, namespace) } // NewForConfig creates a new DataprotectionV1alpha1Client for the given config. diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_actionset.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_actionset.go new file mode 100644 index 00000000000..2ffd828b4ff --- /dev/null +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_actionset.go @@ -0,0 +1,132 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeActionSets implements ActionSetInterface +type FakeActionSets struct { + Fake *FakeDataprotectionV1alpha1 +} + +var actionsetsResource = v1alpha1.SchemeGroupVersion.WithResource("actionsets") + +var actionsetsKind = v1alpha1.SchemeGroupVersion.WithKind("ActionSet") + +// Get takes name of the actionSet, and returns the corresponding actionSet object, and an error if there is any. +func (c *FakeActionSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ActionSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(actionsetsResource, name), &v1alpha1.ActionSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ActionSet), err +} + +// List takes label and field selectors, and returns the list of ActionSets that match those selectors. +func (c *FakeActionSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ActionSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(actionsetsResource, actionsetsKind, opts), &v1alpha1.ActionSetList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ActionSetList{ListMeta: obj.(*v1alpha1.ActionSetList).ListMeta} + for _, item := range obj.(*v1alpha1.ActionSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested actionSets. +func (c *FakeActionSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(actionsetsResource, opts)) +} + +// Create takes the representation of a actionSet and creates it. Returns the server's representation of the actionSet, and an error, if there is any. +func (c *FakeActionSets) Create(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.CreateOptions) (result *v1alpha1.ActionSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(actionsetsResource, actionSet), &v1alpha1.ActionSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ActionSet), err +} + +// Update takes the representation of a actionSet and updates it. Returns the server's representation of the actionSet, and an error, if there is any. +func (c *FakeActionSets) Update(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.UpdateOptions) (result *v1alpha1.ActionSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(actionsetsResource, actionSet), &v1alpha1.ActionSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ActionSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeActionSets) UpdateStatus(ctx context.Context, actionSet *v1alpha1.ActionSet, opts v1.UpdateOptions) (*v1alpha1.ActionSet, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(actionsetsResource, "status", actionSet), &v1alpha1.ActionSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ActionSet), err +} + +// Delete takes name of the actionSet and deletes it. Returns an error if one occurs. +func (c *FakeActionSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(actionsetsResource, name, opts), &v1alpha1.ActionSet{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeActionSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(actionsetsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ActionSetList{}) + return err +} + +// Patch applies the patch and returns the patched actionSet. +func (c *FakeActionSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ActionSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(actionsetsResource, name, pt, data, subresources...), &v1alpha1.ActionSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ActionSet), err +} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backupschedule.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backupschedule.go new file mode 100644 index 00000000000..42f6bfd51c7 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backupschedule.go @@ -0,0 +1,141 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBackupSchedules implements BackupScheduleInterface +type FakeBackupSchedules struct { + Fake *FakeDataprotectionV1alpha1 + ns string +} + +var backupschedulesResource = v1alpha1.SchemeGroupVersion.WithResource("backupschedules") + +var backupschedulesKind = v1alpha1.SchemeGroupVersion.WithKind("BackupSchedule") + +// Get takes name of the backupSchedule, and returns the corresponding backupSchedule object, and an error if there is any. +func (c *FakeBackupSchedules) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupSchedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(backupschedulesResource, c.ns, name), &v1alpha1.BackupSchedule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.BackupSchedule), err +} + +// List takes label and field selectors, and returns the list of BackupSchedules that match those selectors. +func (c *FakeBackupSchedules) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupScheduleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(backupschedulesResource, backupschedulesKind, c.ns, opts), &v1alpha1.BackupScheduleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.BackupScheduleList{ListMeta: obj.(*v1alpha1.BackupScheduleList).ListMeta} + for _, item := range obj.(*v1alpha1.BackupScheduleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested backupSchedules. +func (c *FakeBackupSchedules) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(backupschedulesResource, c.ns, opts)) + +} + +// Create takes the representation of a backupSchedule and creates it. Returns the server's representation of the backupSchedule, and an error, if there is any. +func (c *FakeBackupSchedules) Create(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.CreateOptions) (result *v1alpha1.BackupSchedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(backupschedulesResource, c.ns, backupSchedule), &v1alpha1.BackupSchedule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.BackupSchedule), err +} + +// Update takes the representation of a backupSchedule and updates it. Returns the server's representation of the backupSchedule, and an error, if there is any. +func (c *FakeBackupSchedules) Update(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.UpdateOptions) (result *v1alpha1.BackupSchedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(backupschedulesResource, c.ns, backupSchedule), &v1alpha1.BackupSchedule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.BackupSchedule), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBackupSchedules) UpdateStatus(ctx context.Context, backupSchedule *v1alpha1.BackupSchedule, opts v1.UpdateOptions) (*v1alpha1.BackupSchedule, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(backupschedulesResource, "status", c.ns, backupSchedule), &v1alpha1.BackupSchedule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.BackupSchedule), err +} + +// Delete takes name of the backupSchedule and deletes it. Returns an error if one occurs. +func (c *FakeBackupSchedules) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(backupschedulesResource, c.ns, name, opts), &v1alpha1.BackupSchedule{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBackupSchedules) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(backupschedulesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.BackupScheduleList{}) + return err +} + +// Patch applies the patch and returns the patched backupSchedule. +func (c *FakeBackupSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupSchedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(backupschedulesResource, c.ns, name, pt, data, subresources...), &v1alpha1.BackupSchedule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.BackupSchedule), err +} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go deleted file mode 100644 index 8ef14eefcf3..00000000000 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_backuptool.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeBackupTools implements BackupToolInterface -type FakeBackupTools struct { - Fake *FakeDataprotectionV1alpha1 -} - -var backuptoolsResource = v1alpha1.SchemeGroupVersion.WithResource("backuptools") - -var backuptoolsKind = v1alpha1.SchemeGroupVersion.WithKind("BackupTool") - -// Get takes name of the backupTool, and returns the corresponding backupTool object, and an error if there is any. -func (c *FakeBackupTools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupTool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(backuptoolsResource, name), &v1alpha1.BackupTool{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.BackupTool), err -} - -// List takes label and field selectors, and returns the list of BackupTools that match those selectors. -func (c *FakeBackupTools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupToolList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(backuptoolsResource, backuptoolsKind, opts), &v1alpha1.BackupToolList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.BackupToolList{ListMeta: obj.(*v1alpha1.BackupToolList).ListMeta} - for _, item := range obj.(*v1alpha1.BackupToolList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested backupTools. -func (c *FakeBackupTools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(backuptoolsResource, opts)) -} - -// Create takes the representation of a backupTool and creates it. Returns the server's representation of the backupTool, and an error, if there is any. -func (c *FakeBackupTools) Create(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.CreateOptions) (result *v1alpha1.BackupTool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(backuptoolsResource, backupTool), &v1alpha1.BackupTool{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.BackupTool), err -} - -// Update takes the representation of a backupTool and updates it. Returns the server's representation of the backupTool, and an error, if there is any. -func (c *FakeBackupTools) Update(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.UpdateOptions) (result *v1alpha1.BackupTool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(backuptoolsResource, backupTool), &v1alpha1.BackupTool{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.BackupTool), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeBackupTools) UpdateStatus(ctx context.Context, backupTool *v1alpha1.BackupTool, opts v1.UpdateOptions) (*v1alpha1.BackupTool, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(backuptoolsResource, "status", backupTool), &v1alpha1.BackupTool{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.BackupTool), err -} - -// Delete takes name of the backupTool and deletes it. Returns an error if one occurs. -func (c *FakeBackupTools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(backuptoolsResource, name, opts), &v1alpha1.BackupTool{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeBackupTools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(backuptoolsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.BackupToolList{}) - return err -} - -// Patch applies the patch and returns the patched backupTool. -func (c *FakeBackupTools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupTool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(backuptoolsResource, name, pt, data, subresources...), &v1alpha1.BackupTool{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.BackupTool), err -} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_dataprotection_client.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_dataprotection_client.go index f67c522d124..c13966489f8 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_dataprotection_client.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_dataprotection_client.go @@ -28,6 +28,10 @@ type FakeDataprotectionV1alpha1 struct { *testing.Fake } +func (c *FakeDataprotectionV1alpha1) ActionSets() v1alpha1.ActionSetInterface { + return &FakeActionSets{c} +} + func (c *FakeDataprotectionV1alpha1) Backups(namespace string) v1alpha1.BackupInterface { return &FakeBackups{c, namespace} } @@ -40,12 +44,12 @@ func (c *FakeDataprotectionV1alpha1) BackupRepos() v1alpha1.BackupRepoInterface return &FakeBackupRepos{c} } -func (c *FakeDataprotectionV1alpha1) BackupTools() v1alpha1.BackupToolInterface { - return &FakeBackupTools{c} +func (c *FakeDataprotectionV1alpha1) BackupSchedules(namespace string) v1alpha1.BackupScheduleInterface { + return &FakeBackupSchedules{c, namespace} } -func (c *FakeDataprotectionV1alpha1) RestoreJobs(namespace string) v1alpha1.RestoreJobInterface { - return &FakeRestoreJobs{c, namespace} +func (c *FakeDataprotectionV1alpha1) Restores(namespace string) v1alpha1.RestoreInterface { + return &FakeRestores{c, namespace} } // RESTClient returns a RESTClient that is used to communicate diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restore.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restore.go new file mode 100644 index 00000000000..9925bdea1ba --- /dev/null +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restore.go @@ -0,0 +1,141 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRestores implements RestoreInterface +type FakeRestores struct { + Fake *FakeDataprotectionV1alpha1 + ns string +} + +var restoresResource = v1alpha1.SchemeGroupVersion.WithResource("restores") + +var restoresKind = v1alpha1.SchemeGroupVersion.WithKind("Restore") + +// Get takes name of the restore, and returns the corresponding restore object, and an error if there is any. +func (c *FakeRestores) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(restoresResource, c.ns, name), &v1alpha1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Restore), err +} + +// List takes label and field selectors, and returns the list of Restores that match those selectors. +func (c *FakeRestores) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RestoreList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(restoresResource, restoresKind, c.ns, opts), &v1alpha1.RestoreList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RestoreList{ListMeta: obj.(*v1alpha1.RestoreList).ListMeta} + for _, item := range obj.(*v1alpha1.RestoreList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested restores. +func (c *FakeRestores) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(restoresResource, c.ns, opts)) + +} + +// Create takes the representation of a restore and creates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *FakeRestores) Create(ctx context.Context, restore *v1alpha1.Restore, opts v1.CreateOptions) (result *v1alpha1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(restoresResource, c.ns, restore), &v1alpha1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Restore), err +} + +// Update takes the representation of a restore and updates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *FakeRestores) Update(ctx context.Context, restore *v1alpha1.Restore, opts v1.UpdateOptions) (result *v1alpha1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(restoresResource, c.ns, restore), &v1alpha1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Restore), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRestores) UpdateStatus(ctx context.Context, restore *v1alpha1.Restore, opts v1.UpdateOptions) (*v1alpha1.Restore, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(restoresResource, "status", c.ns, restore), &v1alpha1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Restore), err +} + +// Delete takes name of the restore and deletes it. Returns an error if one occurs. +func (c *FakeRestores) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(restoresResource, c.ns, name, opts), &v1alpha1.Restore{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRestores) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(restoresResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.RestoreList{}) + return err +} + +// Patch applies the patch and returns the patched restore. +func (c *FakeRestores) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(restoresResource, c.ns, name, pt, data, subresources...), &v1alpha1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Restore), err +} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go deleted file mode 100644 index 64e6993b258..00000000000 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/fake/fake_restorejob.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeRestoreJobs implements RestoreJobInterface -type FakeRestoreJobs struct { - Fake *FakeDataprotectionV1alpha1 - ns string -} - -var restorejobsResource = v1alpha1.SchemeGroupVersion.WithResource("restorejobs") - -var restorejobsKind = v1alpha1.SchemeGroupVersion.WithKind("RestoreJob") - -// Get takes name of the restoreJob, and returns the corresponding restoreJob object, and an error if there is any. -func (c *FakeRestoreJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RestoreJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(restorejobsResource, c.ns, name), &v1alpha1.RestoreJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RestoreJob), err -} - -// List takes label and field selectors, and returns the list of RestoreJobs that match those selectors. -func (c *FakeRestoreJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RestoreJobList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(restorejobsResource, restorejobsKind, c.ns, opts), &v1alpha1.RestoreJobList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RestoreJobList{ListMeta: obj.(*v1alpha1.RestoreJobList).ListMeta} - for _, item := range obj.(*v1alpha1.RestoreJobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested restoreJobs. -func (c *FakeRestoreJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(restorejobsResource, c.ns, opts)) - -} - -// Create takes the representation of a restoreJob and creates it. Returns the server's representation of the restoreJob, and an error, if there is any. -func (c *FakeRestoreJobs) Create(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.CreateOptions) (result *v1alpha1.RestoreJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(restorejobsResource, c.ns, restoreJob), &v1alpha1.RestoreJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RestoreJob), err -} - -// Update takes the representation of a restoreJob and updates it. Returns the server's representation of the restoreJob, and an error, if there is any. -func (c *FakeRestoreJobs) Update(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.UpdateOptions) (result *v1alpha1.RestoreJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(restorejobsResource, c.ns, restoreJob), &v1alpha1.RestoreJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RestoreJob), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeRestoreJobs) UpdateStatus(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.UpdateOptions) (*v1alpha1.RestoreJob, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(restorejobsResource, "status", c.ns, restoreJob), &v1alpha1.RestoreJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RestoreJob), err -} - -// Delete takes name of the restoreJob and deletes it. Returns an error if one occurs. -func (c *FakeRestoreJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(restorejobsResource, c.ns, name, opts), &v1alpha1.RestoreJob{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRestoreJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(restorejobsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RestoreJobList{}) - return err -} - -// Patch applies the patch and returns the patched restoreJob. -func (c *FakeRestoreJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RestoreJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(restorejobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RestoreJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RestoreJob), err -} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/generated_expansion.go index 6e5cd155273..594323b5e78 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/generated_expansion.go @@ -18,12 +18,14 @@ limitations under the License. package v1alpha1 +type ActionSetExpansion interface{} + type BackupExpansion interface{} type BackupPolicyExpansion interface{} type BackupRepoExpansion interface{} -type BackupToolExpansion interface{} +type BackupScheduleExpansion interface{} -type RestoreJobExpansion interface{} +type RestoreExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/restorejob.go b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/restore.go similarity index 52% rename from pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/restorejob.go rename to pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/restore.go index 79455ce5f62..923e5a94115 100644 --- a/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/restorejob.go +++ b/pkg/client/clientset/versioned/typed/dataprotection/v1alpha1/restore.go @@ -30,46 +30,46 @@ import ( rest "k8s.io/client-go/rest" ) -// RestoreJobsGetter has a method to return a RestoreJobInterface. +// RestoresGetter has a method to return a RestoreInterface. // A group's client should implement this interface. -type RestoreJobsGetter interface { - RestoreJobs(namespace string) RestoreJobInterface +type RestoresGetter interface { + Restores(namespace string) RestoreInterface } -// RestoreJobInterface has methods to work with RestoreJob resources. -type RestoreJobInterface interface { - Create(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.CreateOptions) (*v1alpha1.RestoreJob, error) - Update(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.UpdateOptions) (*v1alpha1.RestoreJob, error) - UpdateStatus(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.UpdateOptions) (*v1alpha1.RestoreJob, error) +// RestoreInterface has methods to work with Restore resources. +type RestoreInterface interface { + Create(ctx context.Context, restore *v1alpha1.Restore, opts v1.CreateOptions) (*v1alpha1.Restore, error) + Update(ctx context.Context, restore *v1alpha1.Restore, opts v1.UpdateOptions) (*v1alpha1.Restore, error) + UpdateStatus(ctx context.Context, restore *v1alpha1.Restore, opts v1.UpdateOptions) (*v1alpha1.Restore, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RestoreJob, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RestoreJobList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Restore, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RestoreList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RestoreJob, err error) - RestoreJobExpansion + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Restore, err error) + RestoreExpansion } -// restoreJobs implements RestoreJobInterface -type restoreJobs struct { +// restores implements RestoreInterface +type restores struct { client rest.Interface ns string } -// newRestoreJobs returns a RestoreJobs -func newRestoreJobs(c *DataprotectionV1alpha1Client, namespace string) *restoreJobs { - return &restoreJobs{ +// newRestores returns a Restores +func newRestores(c *DataprotectionV1alpha1Client, namespace string) *restores { + return &restores{ client: c.RESTClient(), ns: namespace, } } -// Get takes name of the restoreJob, and returns the corresponding restoreJob object, and an error if there is any. -func (c *restoreJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RestoreJob, err error) { - result = &v1alpha1.RestoreJob{} +// Get takes name of the restore, and returns the corresponding restore object, and an error if there is any. +func (c *restores) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Restore, err error) { + result = &v1alpha1.Restore{} err = c.client.Get(). Namespace(c.ns). - Resource("restorejobs"). + Resource("restores"). Name(name). VersionedParams(&options, scheme.ParameterCodec). Do(ctx). @@ -77,16 +77,16 @@ func (c *restoreJobs) Get(ctx context.Context, name string, options v1.GetOption return } -// List takes label and field selectors, and returns the list of RestoreJobs that match those selectors. -func (c *restoreJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RestoreJobList, err error) { +// List takes label and field selectors, and returns the list of Restores that match those selectors. +func (c *restores) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RestoreList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.RestoreJobList{} + result = &v1alpha1.RestoreList{} err = c.client.Get(). Namespace(c.ns). - Resource("restorejobs"). + Resource("restores"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Do(ctx). @@ -94,8 +94,8 @@ func (c *restoreJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1 return } -// Watch returns a watch.Interface that watches the requested restoreJobs. -func (c *restoreJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +// Watch returns a watch.Interface that watches the requested restores. +func (c *restores) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -103,34 +103,34 @@ func (c *restoreJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int opts.Watch = true return c.client.Get(). Namespace(c.ns). - Resource("restorejobs"). + Resource("restores"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Watch(ctx) } -// Create takes the representation of a restoreJob and creates it. Returns the server's representation of the restoreJob, and an error, if there is any. -func (c *restoreJobs) Create(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.CreateOptions) (result *v1alpha1.RestoreJob, err error) { - result = &v1alpha1.RestoreJob{} +// Create takes the representation of a restore and creates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *restores) Create(ctx context.Context, restore *v1alpha1.Restore, opts v1.CreateOptions) (result *v1alpha1.Restore, err error) { + result = &v1alpha1.Restore{} err = c.client.Post(). Namespace(c.ns). - Resource("restorejobs"). + Resource("restores"). VersionedParams(&opts, scheme.ParameterCodec). - Body(restoreJob). + Body(restore). Do(ctx). Into(result) return } -// Update takes the representation of a restoreJob and updates it. Returns the server's representation of the restoreJob, and an error, if there is any. -func (c *restoreJobs) Update(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.UpdateOptions) (result *v1alpha1.RestoreJob, err error) { - result = &v1alpha1.RestoreJob{} +// Update takes the representation of a restore and updates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *restores) Update(ctx context.Context, restore *v1alpha1.Restore, opts v1.UpdateOptions) (result *v1alpha1.Restore, err error) { + result = &v1alpha1.Restore{} err = c.client.Put(). Namespace(c.ns). - Resource("restorejobs"). - Name(restoreJob.Name). + Resource("restores"). + Name(restore.Name). VersionedParams(&opts, scheme.ParameterCodec). - Body(restoreJob). + Body(restore). Do(ctx). Into(result) return @@ -138,25 +138,25 @@ func (c *restoreJobs) Update(ctx context.Context, restoreJob *v1alpha1.RestoreJo // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *restoreJobs) UpdateStatus(ctx context.Context, restoreJob *v1alpha1.RestoreJob, opts v1.UpdateOptions) (result *v1alpha1.RestoreJob, err error) { - result = &v1alpha1.RestoreJob{} +func (c *restores) UpdateStatus(ctx context.Context, restore *v1alpha1.Restore, opts v1.UpdateOptions) (result *v1alpha1.Restore, err error) { + result = &v1alpha1.Restore{} err = c.client.Put(). Namespace(c.ns). - Resource("restorejobs"). - Name(restoreJob.Name). + Resource("restores"). + Name(restore.Name). SubResource("status"). VersionedParams(&opts, scheme.ParameterCodec). - Body(restoreJob). + Body(restore). Do(ctx). Into(result) return } -// Delete takes name of the restoreJob and deletes it. Returns an error if one occurs. -func (c *restoreJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +// Delete takes name of the restore and deletes it. Returns an error if one occurs. +func (c *restores) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). - Resource("restorejobs"). + Resource("restores"). Name(name). Body(&opts). Do(ctx). @@ -164,14 +164,14 @@ func (c *restoreJobs) Delete(ctx context.Context, name string, opts v1.DeleteOpt } // DeleteCollection deletes a collection of objects. -func (c *restoreJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *restores) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second } return c.client.Delete(). Namespace(c.ns). - Resource("restorejobs"). + Resource("restores"). VersionedParams(&listOpts, scheme.ParameterCodec). Timeout(timeout). Body(&opts). @@ -179,12 +179,12 @@ func (c *restoreJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOption Error() } -// Patch applies the patch and returns the patched restoreJob. -func (c *restoreJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RestoreJob, err error) { - result = &v1alpha1.RestoreJob{} +// Patch applies the patch and returns the patched restore. +func (c *restores) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Restore, err error) { + result = &v1alpha1.Restore{} err = c.client.Patch(pt). Namespace(c.ns). - Resource("restorejobs"). + Resource("restores"). Name(name). SubResource(subresources...). VersionedParams(&opts, scheme.ParameterCodec). diff --git a/pkg/client/informers/externalversions/dataprotection/v1alpha1/backuptool.go b/pkg/client/informers/externalversions/dataprotection/v1alpha1/actionset.go similarity index 58% rename from pkg/client/informers/externalversions/dataprotection/v1alpha1/backuptool.go rename to pkg/client/informers/externalversions/dataprotection/v1alpha1/actionset.go index 4a29ff221f7..edab407a699 100644 --- a/pkg/client/informers/externalversions/dataprotection/v1alpha1/backuptool.go +++ b/pkg/client/informers/externalversions/dataprotection/v1alpha1/actionset.go @@ -32,58 +32,58 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// BackupToolInformer provides access to a shared informer and lister for -// BackupTools. -type BackupToolInformer interface { +// ActionSetInformer provides access to a shared informer and lister for +// ActionSets. +type ActionSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.BackupToolLister + Lister() v1alpha1.ActionSetLister } -type backupToolInformer struct { +type actionSetInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc } -// NewBackupToolInformer constructs a new informer for BackupTool type. +// NewActionSetInformer constructs a new informer for ActionSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewBackupToolInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredBackupToolInformer(client, resyncPeriod, indexers, nil) +func NewActionSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredActionSetInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredBackupToolInformer constructs a new informer for BackupTool type. +// NewFilteredActionSetInformer constructs a new informer for ActionSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredBackupToolInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredActionSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DataprotectionV1alpha1().BackupTools().List(context.TODO(), options) + return client.DataprotectionV1alpha1().ActionSets().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DataprotectionV1alpha1().BackupTools().Watch(context.TODO(), options) + return client.DataprotectionV1alpha1().ActionSets().Watch(context.TODO(), options) }, }, - &dataprotectionv1alpha1.BackupTool{}, + &dataprotectionv1alpha1.ActionSet{}, resyncPeriod, indexers, ) } -func (f *backupToolInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredBackupToolInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *actionSetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredActionSetInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *backupToolInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&dataprotectionv1alpha1.BackupTool{}, f.defaultInformer) +func (f *actionSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&dataprotectionv1alpha1.ActionSet{}, f.defaultInformer) } -func (f *backupToolInformer) Lister() v1alpha1.BackupToolLister { - return v1alpha1.NewBackupToolLister(f.Informer().GetIndexer()) +func (f *actionSetInformer) Lister() v1alpha1.ActionSetLister { + return v1alpha1.NewActionSetLister(f.Informer().GetIndexer()) } diff --git a/pkg/client/informers/externalversions/dataprotection/v1alpha1/backupschedule.go b/pkg/client/informers/externalversions/dataprotection/v1alpha1/backupschedule.go new file mode 100644 index 00000000000..492f03c4fc3 --- /dev/null +++ b/pkg/client/informers/externalversions/dataprotection/v1alpha1/backupschedule.go @@ -0,0 +1,90 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + versioned "github.com/apecloud/kubeblocks/pkg/client/clientset/versioned" + internalinterfaces "github.com/apecloud/kubeblocks/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/apecloud/kubeblocks/pkg/client/listers/dataprotection/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BackupScheduleInformer provides access to a shared informer and lister for +// BackupSchedules. +type BackupScheduleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.BackupScheduleLister +} + +type backupScheduleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBackupScheduleInformer constructs a new informer for BackupSchedule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBackupScheduleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupScheduleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBackupScheduleInformer constructs a new informer for BackupSchedule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBackupScheduleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DataprotectionV1alpha1().BackupSchedules(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DataprotectionV1alpha1().BackupSchedules(namespace).Watch(context.TODO(), options) + }, + }, + &dataprotectionv1alpha1.BackupSchedule{}, + resyncPeriod, + indexers, + ) +} + +func (f *backupScheduleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupScheduleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *backupScheduleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&dataprotectionv1alpha1.BackupSchedule{}, f.defaultInformer) +} + +func (f *backupScheduleInformer) Lister() v1alpha1.BackupScheduleLister { + return v1alpha1.NewBackupScheduleLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/dataprotection/v1alpha1/interface.go b/pkg/client/informers/externalversions/dataprotection/v1alpha1/interface.go index 32c098e82d2..54fa5e1423a 100644 --- a/pkg/client/informers/externalversions/dataprotection/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/dataprotection/v1alpha1/interface.go @@ -24,16 +24,18 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ActionSets returns a ActionSetInformer. + ActionSets() ActionSetInformer // Backups returns a BackupInformer. Backups() BackupInformer // BackupPolicies returns a BackupPolicyInformer. BackupPolicies() BackupPolicyInformer // BackupRepos returns a BackupRepoInformer. BackupRepos() BackupRepoInformer - // BackupTools returns a BackupToolInformer. - BackupTools() BackupToolInformer - // RestoreJobs returns a RestoreJobInformer. - RestoreJobs() RestoreJobInformer + // BackupSchedules returns a BackupScheduleInformer. + BackupSchedules() BackupScheduleInformer + // Restores returns a RestoreInformer. + Restores() RestoreInformer } type version struct { @@ -47,6 +49,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ActionSets returns a ActionSetInformer. +func (v *version) ActionSets() ActionSetInformer { + return &actionSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // Backups returns a BackupInformer. func (v *version) Backups() BackupInformer { return &backupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} @@ -62,12 +69,12 @@ func (v *version) BackupRepos() BackupRepoInformer { return &backupRepoInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } -// BackupTools returns a BackupToolInformer. -func (v *version) BackupTools() BackupToolInformer { - return &backupToolInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +// BackupSchedules returns a BackupScheduleInformer. +func (v *version) BackupSchedules() BackupScheduleInformer { + return &backupScheduleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// RestoreJobs returns a RestoreJobInformer. -func (v *version) RestoreJobs() RestoreJobInformer { - return &restoreJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// Restores returns a RestoreInformer. +func (v *version) Restores() RestoreInformer { + return &restoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/pkg/client/informers/externalversions/dataprotection/v1alpha1/restorejob.go b/pkg/client/informers/externalversions/dataprotection/v1alpha1/restore.go similarity index 57% rename from pkg/client/informers/externalversions/dataprotection/v1alpha1/restorejob.go rename to pkg/client/informers/externalversions/dataprotection/v1alpha1/restore.go index 3a339184313..fe0e68f77c7 100644 --- a/pkg/client/informers/externalversions/dataprotection/v1alpha1/restorejob.go +++ b/pkg/client/informers/externalversions/dataprotection/v1alpha1/restore.go @@ -32,59 +32,59 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// RestoreJobInformer provides access to a shared informer and lister for -// RestoreJobs. -type RestoreJobInformer interface { +// RestoreInformer provides access to a shared informer and lister for +// Restores. +type RestoreInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.RestoreJobLister + Lister() v1alpha1.RestoreLister } -type restoreJobInformer struct { +type restoreInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewRestoreJobInformer constructs a new informer for RestoreJob type. +// NewRestoreInformer constructs a new informer for Restore type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewRestoreJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredRestoreJobInformer(client, namespace, resyncPeriod, indexers, nil) +func NewRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRestoreInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredRestoreJobInformer constructs a new informer for RestoreJob type. +// NewFilteredRestoreInformer constructs a new informer for Restore type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredRestoreJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DataprotectionV1alpha1().RestoreJobs(namespace).List(context.TODO(), options) + return client.DataprotectionV1alpha1().Restores(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DataprotectionV1alpha1().RestoreJobs(namespace).Watch(context.TODO(), options) + return client.DataprotectionV1alpha1().Restores(namespace).Watch(context.TODO(), options) }, }, - &dataprotectionv1alpha1.RestoreJob{}, + &dataprotectionv1alpha1.Restore{}, resyncPeriod, indexers, ) } -func (f *restoreJobInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredRestoreJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *restoreInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRestoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *restoreJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&dataprotectionv1alpha1.RestoreJob{}, f.defaultInformer) +func (f *restoreInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&dataprotectionv1alpha1.Restore{}, f.defaultInformer) } -func (f *restoreJobInformer) Lister() v1alpha1.RestoreJobLister { - return v1alpha1.NewRestoreJobLister(f.Informer().GetIndexer()) +func (f *restoreInformer) Lister() v1alpha1.RestoreLister { + return v1alpha1.NewRestoreLister(f.Informer().GetIndexer()) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index c18f12b4fc1..3d50c4b4179 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -77,16 +77,18 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1alpha1().ServiceDescriptors().Informer()}, nil // Group=dataprotection.kubeblocks.io, Version=v1alpha1 + case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("actionsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().ActionSets().Informer()}, nil case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("backups"): return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().Backups().Informer()}, nil case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("backuppolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().BackupPolicies().Informer()}, nil case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("backuprepos"): return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().BackupRepos().Informer()}, nil - case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("backuptools"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().BackupTools().Informer()}, nil - case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("restorejobs"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().RestoreJobs().Informer()}, nil + case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("backupschedules"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().BackupSchedules().Informer()}, nil + case dataprotectionv1alpha1.SchemeGroupVersion.WithResource("restores"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Dataprotection().V1alpha1().Restores().Informer()}, nil // Group=extensions.kubeblocks.io, Version=v1alpha1 case extensionsv1alpha1.SchemeGroupVersion.WithResource("addons"): diff --git a/pkg/client/listers/dataprotection/v1alpha1/backuptool.go b/pkg/client/listers/dataprotection/v1alpha1/actionset.go similarity index 53% rename from pkg/client/listers/dataprotection/v1alpha1/backuptool.go rename to pkg/client/listers/dataprotection/v1alpha1/actionset.go index 0fbe6df4f23..c66aecad27d 100644 --- a/pkg/client/listers/dataprotection/v1alpha1/backuptool.go +++ b/pkg/client/listers/dataprotection/v1alpha1/actionset.go @@ -25,44 +25,44 @@ import ( "k8s.io/client-go/tools/cache" ) -// BackupToolLister helps list BackupTools. +// ActionSetLister helps list ActionSets. // All objects returned here must be treated as read-only. -type BackupToolLister interface { - // List lists all BackupTools in the indexer. +type ActionSetLister interface { + // List lists all ActionSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.BackupTool, err error) - // Get retrieves the BackupTool from the index for a given name. + List(selector labels.Selector) (ret []*v1alpha1.ActionSet, err error) + // Get retrieves the ActionSet from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.BackupTool, error) - BackupToolListerExpansion + Get(name string) (*v1alpha1.ActionSet, error) + ActionSetListerExpansion } -// backupToolLister implements the BackupToolLister interface. -type backupToolLister struct { +// actionSetLister implements the ActionSetLister interface. +type actionSetLister struct { indexer cache.Indexer } -// NewBackupToolLister returns a new BackupToolLister. -func NewBackupToolLister(indexer cache.Indexer) BackupToolLister { - return &backupToolLister{indexer: indexer} +// NewActionSetLister returns a new ActionSetLister. +func NewActionSetLister(indexer cache.Indexer) ActionSetLister { + return &actionSetLister{indexer: indexer} } -// List lists all BackupTools in the indexer. -func (s *backupToolLister) List(selector labels.Selector) (ret []*v1alpha1.BackupTool, err error) { +// List lists all ActionSets in the indexer. +func (s *actionSetLister) List(selector labels.Selector) (ret []*v1alpha1.ActionSet, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.BackupTool)) + ret = append(ret, m.(*v1alpha1.ActionSet)) }) return ret, err } -// Get retrieves the BackupTool from the index for a given name. -func (s *backupToolLister) Get(name string) (*v1alpha1.BackupTool, error) { +// Get retrieves the ActionSet from the index for a given name. +func (s *actionSetLister) Get(name string) (*v1alpha1.ActionSet, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("backuptool"), name) + return nil, errors.NewNotFound(v1alpha1.Resource("actionset"), name) } - return obj.(*v1alpha1.BackupTool), nil + return obj.(*v1alpha1.ActionSet), nil } diff --git a/pkg/client/listers/dataprotection/v1alpha1/backupschedule.go b/pkg/client/listers/dataprotection/v1alpha1/backupschedule.go new file mode 100644 index 00000000000..21f2fdd1a8b --- /dev/null +++ b/pkg/client/listers/dataprotection/v1alpha1/backupschedule.go @@ -0,0 +1,99 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BackupScheduleLister helps list BackupSchedules. +// All objects returned here must be treated as read-only. +type BackupScheduleLister interface { + // List lists all BackupSchedules in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.BackupSchedule, err error) + // BackupSchedules returns an object that can list and get BackupSchedules. + BackupSchedules(namespace string) BackupScheduleNamespaceLister + BackupScheduleListerExpansion +} + +// backupScheduleLister implements the BackupScheduleLister interface. +type backupScheduleLister struct { + indexer cache.Indexer +} + +// NewBackupScheduleLister returns a new BackupScheduleLister. +func NewBackupScheduleLister(indexer cache.Indexer) BackupScheduleLister { + return &backupScheduleLister{indexer: indexer} +} + +// List lists all BackupSchedules in the indexer. +func (s *backupScheduleLister) List(selector labels.Selector) (ret []*v1alpha1.BackupSchedule, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.BackupSchedule)) + }) + return ret, err +} + +// BackupSchedules returns an object that can list and get BackupSchedules. +func (s *backupScheduleLister) BackupSchedules(namespace string) BackupScheduleNamespaceLister { + return backupScheduleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BackupScheduleNamespaceLister helps list and get BackupSchedules. +// All objects returned here must be treated as read-only. +type BackupScheduleNamespaceLister interface { + // List lists all BackupSchedules in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.BackupSchedule, err error) + // Get retrieves the BackupSchedule from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.BackupSchedule, error) + BackupScheduleNamespaceListerExpansion +} + +// backupScheduleNamespaceLister implements the BackupScheduleNamespaceLister +// interface. +type backupScheduleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BackupSchedules in the indexer for a given namespace. +func (s backupScheduleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.BackupSchedule, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.BackupSchedule)) + }) + return ret, err +} + +// Get retrieves the BackupSchedule from the indexer for a given namespace and name. +func (s backupScheduleNamespaceLister) Get(name string) (*v1alpha1.BackupSchedule, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("backupschedule"), name) + } + return obj.(*v1alpha1.BackupSchedule), nil +} diff --git a/pkg/client/listers/dataprotection/v1alpha1/expansion_generated.go b/pkg/client/listers/dataprotection/v1alpha1/expansion_generated.go index e9283c098cb..8e7bab6e1c3 100644 --- a/pkg/client/listers/dataprotection/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/dataprotection/v1alpha1/expansion_generated.go @@ -18,6 +18,10 @@ limitations under the License. package v1alpha1 +// ActionSetListerExpansion allows custom methods to be added to +// ActionSetLister. +type ActionSetListerExpansion interface{} + // BackupListerExpansion allows custom methods to be added to // BackupLister. type BackupListerExpansion interface{} @@ -38,14 +42,18 @@ type BackupPolicyNamespaceListerExpansion interface{} // BackupRepoLister. type BackupRepoListerExpansion interface{} -// BackupToolListerExpansion allows custom methods to be added to -// BackupToolLister. -type BackupToolListerExpansion interface{} +// BackupScheduleListerExpansion allows custom methods to be added to +// BackupScheduleLister. +type BackupScheduleListerExpansion interface{} + +// BackupScheduleNamespaceListerExpansion allows custom methods to be added to +// BackupScheduleNamespaceLister. +type BackupScheduleNamespaceListerExpansion interface{} -// RestoreJobListerExpansion allows custom methods to be added to -// RestoreJobLister. -type RestoreJobListerExpansion interface{} +// RestoreListerExpansion allows custom methods to be added to +// RestoreLister. +type RestoreListerExpansion interface{} -// RestoreJobNamespaceListerExpansion allows custom methods to be added to -// RestoreJobNamespaceLister. -type RestoreJobNamespaceListerExpansion interface{} +// RestoreNamespaceListerExpansion allows custom methods to be added to +// RestoreNamespaceLister. +type RestoreNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/dataprotection/v1alpha1/restore.go b/pkg/client/listers/dataprotection/v1alpha1/restore.go new file mode 100644 index 00000000000..02979dd1108 --- /dev/null +++ b/pkg/client/listers/dataprotection/v1alpha1/restore.go @@ -0,0 +1,99 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RestoreLister helps list Restores. +// All objects returned here must be treated as read-only. +type RestoreLister interface { + // List lists all Restores in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.Restore, err error) + // Restores returns an object that can list and get Restores. + Restores(namespace string) RestoreNamespaceLister + RestoreListerExpansion +} + +// restoreLister implements the RestoreLister interface. +type restoreLister struct { + indexer cache.Indexer +} + +// NewRestoreLister returns a new RestoreLister. +func NewRestoreLister(indexer cache.Indexer) RestoreLister { + return &restoreLister{indexer: indexer} +} + +// List lists all Restores in the indexer. +func (s *restoreLister) List(selector labels.Selector) (ret []*v1alpha1.Restore, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Restore)) + }) + return ret, err +} + +// Restores returns an object that can list and get Restores. +func (s *restoreLister) Restores(namespace string) RestoreNamespaceLister { + return restoreNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RestoreNamespaceLister helps list and get Restores. +// All objects returned here must be treated as read-only. +type RestoreNamespaceLister interface { + // List lists all Restores in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.Restore, err error) + // Get retrieves the Restore from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.Restore, error) + RestoreNamespaceListerExpansion +} + +// restoreNamespaceLister implements the RestoreNamespaceLister +// interface. +type restoreNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Restores in the indexer for a given namespace. +func (s restoreNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Restore, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Restore)) + }) + return ret, err +} + +// Get retrieves the Restore from the indexer for a given namespace and name. +func (s restoreNamespaceLister) Get(name string) (*v1alpha1.Restore, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("restore"), name) + } + return obj.(*v1alpha1.Restore), nil +} diff --git a/pkg/client/listers/dataprotection/v1alpha1/restorejob.go b/pkg/client/listers/dataprotection/v1alpha1/restorejob.go deleted file mode 100644 index fabe7031849..00000000000 --- a/pkg/client/listers/dataprotection/v1alpha1/restorejob.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// RestoreJobLister helps list RestoreJobs. -// All objects returned here must be treated as read-only. -type RestoreJobLister interface { - // List lists all RestoreJobs in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RestoreJob, err error) - // RestoreJobs returns an object that can list and get RestoreJobs. - RestoreJobs(namespace string) RestoreJobNamespaceLister - RestoreJobListerExpansion -} - -// restoreJobLister implements the RestoreJobLister interface. -type restoreJobLister struct { - indexer cache.Indexer -} - -// NewRestoreJobLister returns a new RestoreJobLister. -func NewRestoreJobLister(indexer cache.Indexer) RestoreJobLister { - return &restoreJobLister{indexer: indexer} -} - -// List lists all RestoreJobs in the indexer. -func (s *restoreJobLister) List(selector labels.Selector) (ret []*v1alpha1.RestoreJob, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RestoreJob)) - }) - return ret, err -} - -// RestoreJobs returns an object that can list and get RestoreJobs. -func (s *restoreJobLister) RestoreJobs(namespace string) RestoreJobNamespaceLister { - return restoreJobNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// RestoreJobNamespaceLister helps list and get RestoreJobs. -// All objects returned here must be treated as read-only. -type RestoreJobNamespaceLister interface { - // List lists all RestoreJobs in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RestoreJob, err error) - // Get retrieves the RestoreJob from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.RestoreJob, error) - RestoreJobNamespaceListerExpansion -} - -// restoreJobNamespaceLister implements the RestoreJobNamespaceLister -// interface. -type restoreJobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RestoreJobs in the indexer for a given namespace. -func (s restoreJobNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RestoreJob, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RestoreJob)) - }) - return ret, err -} - -// Get retrieves the RestoreJob from the indexer for a given namespace and name. -func (s restoreJobNamespaceLister) Get(name string) (*v1alpha1.RestoreJob, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("restorejob"), name) - } - return obj.(*v1alpha1.RestoreJob), nil -} diff --git a/test/integration/backup_mysql_test.go b/test/integration/backup_mysql_test.go index 750394e3415..3d2dc5a82f0 100644 --- a/test/integration/backup_mysql_test.go +++ b/test/integration/backup_mysql_test.go @@ -30,6 +30,7 @@ import ( "github.com/apecloud/kubeblocks/internal/controller/component" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" ) var _ = Describe("MySQL data protection function", func() { @@ -63,8 +64,8 @@ var _ = Describe("MySQL data protection function", func() { testapps.ClearResources(&testCtx, intctrlutil.ConfigMapSignature, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.BackupSignature, true, inNS) testapps.ClearResources(&testCtx, intctrlutil.BackupPolicySignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.BackupToolSignature, inNS, ml) - testapps.ClearResources(&testCtx, intctrlutil.RestoreJobSignature, inNS, ml) + testapps.ClearResources(&testCtx, intctrlutil.ActionSetSignature, inNS, ml) + testapps.ClearResources(&testCtx, intctrlutil.RestoreSignature, inNS, ml) } @@ -100,7 +101,6 @@ var _ = Describe("MySQL data protection function", func() { Create(&testCtx).GetObject() By("Create a cluster obj") - pvcSpec := testapps.NewPVCSpec("1Gi") clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterNamePrefix, clusterDefObj.Name, clusterVersionObj.Name).WithRandomName(). @@ -118,18 +118,17 @@ var _ = Describe("MySQL data protection function", func() { } createBackupObj := func() { - By("By creating a backupTool") - backupTool := testapps.CreateCustomizedObj(&testCtx, "backup/backuptool.yaml", - &dpv1alpha1.BackupTool{}, testapps.RandomizedObjName()) + By("By creating a actionSet") + actionSet := testapps.CreateCustomizedObj(&testCtx, "backup/actionset.yaml", + &dpv1alpha1.ActionSet{}, testapps.RandomizedObjName()) By("By creating a backupPolicy from backupPolicyTemplate: " + backupPolicyTemplateName) - backupPolicyObj := testapps.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). + backupPolicyObj := testdp.NewBackupPolicyFactory(testCtx.DefaultNamespace, backupPolicyName). WithRandomName(). - AddDataFilePolicy(). - SetBackupToolName(backupTool.Name). - AddMatchLabels(constant.AppInstanceLabelKey, clusterKey.Name). - SetTargetSecretName(component.GenerateConnCredential(clusterKey.Name)). - SetPVC(backupRemotePVCName). + SetTarget(constant.AppInstanceLabelKey, clusterKey.Name). + SetTargetConnectionCredential(component.GenerateConnCredential(clusterKey.Name)). + AddBackupMethod(testdp.BackupMethodName, false, actionSet.Name). + SetBackupMethodVolumeMounts(testapps.DataVolumeName, "/data"). Create(&testCtx).GetObject() backupPolicyKey := client.ObjectKeyFromObject(backupPolicyObj) @@ -142,14 +141,14 @@ var _ = Describe("MySQL data protection function", func() { By("By check backupPolicy available") Eventually(testapps.CheckObj(&testCtx, backupPolicyKey, func(g Gomega, backupPolicy *dpv1alpha1.BackupPolicy) { - g.Expect(backupPolicy.Status.Phase).To(Equal(dpv1alpha1.PolicyAvailable)) + g.Expect(backupPolicy.Status.Phase).To(Equal(dpv1alpha1.BackupPolicyAvailable)) })).Should(Succeed()) By("By creating a backup from backupPolicy: " + backupPolicyKey.Name) - backup := testapps.NewBackupFactory(testCtx.DefaultNamespace, backupName). + backup := testdp.NewBackupFactory(testCtx.DefaultNamespace, backupName). WithRandomName(). SetBackupPolicyName(backupPolicyKey.Name). - SetBackupType(dpv1alpha1.BackupTypeDataFile). + SetBackupMethod(testdp.BackupMethodName). Create(&testCtx).GetObject() backupKey = client.ObjectKeyFromObject(backup) } @@ -162,7 +161,7 @@ var _ = Describe("MySQL data protection function", func() { It("should be completed", func() { Eventually(testapps.CheckObj(&testCtx, backupKey, func(g Gomega, backup *dpv1alpha1.Backup) { - g.Expect(backup.Status.Phase).To(Equal(dpv1alpha1.BackupCompleted)) + g.Expect(backup.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseCompleted)) })).Should(Succeed()) }) }) diff --git a/test/integration/controller_suite_test.go b/test/integration/controller_suite_test.go index 1afd079d2f0..65ca963f1c8 100644 --- a/test/integration/controller_suite_test.go +++ b/test/integration/controller_suite_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/controllers/apps" dpctrl "github.com/apecloud/kubeblocks/controllers/dataprotection" "github.com/apecloud/kubeblocks/controllers/k8score" @@ -256,7 +256,7 @@ var _ = BeforeSuite(func() { err = appsv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = dataprotectionv1alpha1.AddToScheme(scheme.Scheme) + err = dpv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = snapshotv1.AddToScheme(scheme.Scheme) @@ -336,33 +336,33 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&dpctrl.BackupPolicyReconciler{ + err = (&dpctrl.BackupScheduleReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), Recorder: k8sManager.GetEventRecorderFor("backup-policy-controller"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&dpctrl.BackupToolReconciler{ + err = (&dpctrl.ActionSetReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), Recorder: k8sManager.GetEventRecorderFor("backup-tool-controller"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&dpctrl.RestoreJobReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("restore-job-controller"), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - err = (&dpctrl.CronJobReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("cronjob-controller"), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) + // err = (&dpctrl.RestoreJobReconciler{ + // Client: k8sManager.GetClient(), + // Scheme: k8sManager.GetScheme(), + // Recorder: k8sManager.GetEventRecorderFor("restore-job-controller"), + // }).SetupWithManager(k8sManager) + // Expect(err).ToNot(HaveOccurred()) + + // err = (&dpctrl.CronJobReconciler{ + // Client: k8sManager.GetClient(), + // Scheme: k8sManager.GetScheme(), + // Recorder: k8sManager.GetEventRecorderFor("cronjob-controller"), + // }).SetupWithManager(k8sManager) + // Expect(err).ToNot(HaveOccurred()) // pulling docker images is slow viper.SetDefault("EventuallyTimeout", time.Second*300) diff --git a/test/testdata/backup/actionset.yaml b/test/testdata/backup/actionset.yaml new file mode 100644 index 00000000000..3f9ec0602ca --- /dev/null +++ b/test/testdata/backup/actionset.yaml @@ -0,0 +1,36 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + name: action-set- +spec: + backupType: Full + env: + - name: DATA_DIR + value: /var/lib/mysql + backup: + backupData: + image: registry.cn-hangzhou.aliyuncs.com/apecloud/percona-xtrabackup + runOnTargetPodNode: true + command: + - sh + - -c + - echo "DB_HOST=${DP_DB_HOST} DB_USER=${DP_DB_USER} DB_PASSWORD=${DP_DB_PASSWORD} DATA_DIR=${DATA_DIR} BACKUP_DIR=${DP_BACKUP_DIR} BACKUP_NAME=${DP_BACKUP_NAME}"; + mkdir -p /${BACKUP_DIR}; + xtrabackup --compress --backup --safe-slave-backup --slave-info --stream=xbstream --host=${DP_DB_HOST} \ + --user=${DP_DB_USER} --password=${DP_DB_PASSWORD} --datadir=${DATA_DIR} > /${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.xbstream + restore: + prepareData: + image: registry.cn-hangzhou.aliyuncs.com/apecloud/percona-xtrabackup + command: + - sh + - -c + - | + echo "BACKUP_DIR=${DP_BACKUP_DIR} BACKUP_NAME=${DP_BACKUP_NAME} DATA_DIR=${DATA_DIR}" && \ + mkdir -p /tmp/data/ && cd /tmp/data \ + && xbstream -x < /${DP_BACKUP_DIR}/${DP_BACKUP_NAME}.xbstream \ + && xtrabackup --decompress --target-dir=/tmp/data/ \ + && find . -name "*.qp"|xargs rm -f \ + && rm -rf ${DATA_DIR}/* \ + && rsync -avrP /tmp/data/ ${DATA_DIR}/ \ + && rm -rf /tmp/data/ \ + && chmod -R 0777 ${DATA_DIR} \ No newline at end of file diff --git a/test/testdata/backup/backuptool.yaml b/test/testdata/backup/backuptool.yaml deleted file mode 100644 index 4edaae0eda3..00000000000 --- a/test/testdata/backup/backuptool.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupTool -metadata: - name: backup-tool- -spec: - image: registry.cn-hangzhou.aliyuncs.com/apecloud/percona-xtrabackup - deployKind: job - env: - - name: DATA_DIR - value: /var/lib/mysql - physical: - restoreCommands: - - sh - - -c - - | - echo "BACKUP_DIR=${BACKUP_DIR} BACKUP_NAME=${BACKUP_NAME} DATA_DIR=${DATA_DIR}" && \ - mkdir -p /tmp/data/ && cd /tmp/data \ - && xbstream -x < /${BACKUP_DIR}/${BACKUP_NAME}.xbstream \ - && xtrabackup --decompress --target-dir=/tmp/data/ \ - && find . -name "*.qp"|xargs rm -f \ - && rm -rf ${DATA_DIR}/* \ - && rsync -avrP /tmp/data/ ${DATA_DIR}/ \ - && rm -rf /tmp/data/ \ - && chmod -R 0777 ${DATA_DIR} - incrementalRestoreCommands: [] - logical: - restoreCommands: [] - incrementalRestoreCommands: [] - backupCommands: - - sh - - -c - - echo "DB_HOST=${DB_HOST} DB_USER=${DB_USER} DB_PASSWORD=${DB_PASSWORD} DATA_DIR=${DATA_DIR} BACKUP_DIR=${BACKUP_DIR} BACKUP_NAME=${BACKUP_NAME}"; - mkdir -p /${BACKUP_DIR}; - xtrabackup --compress --backup --safe-slave-backup --slave-info --stream=xbstream --host=${DB_HOST} --user=${DB_USER} --password=${DB_PASSWORD} --datadir=${DATA_DIR} > /${BACKUP_DIR}/${BACKUP_NAME}.xbstream - incrementalBackupCommands: [] \ No newline at end of file From ef5b0dd6aff0559451958bc57e4e961bf28c151f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 11:57:36 +0800 Subject: [PATCH 53/58] chore(deps): bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 (#5303) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: L.Dongming --- go.mod | 8 +++----- go.sum | 20 ++++++-------------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index c48b0cc0ded..f72bb23a94e 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/jackc/pgx/v5 v5.4.3 github.com/jedib0t/go-pretty/v6 v6.4.6 github.com/json-iterator/go v1.1.12 - github.com/k3d-io/k3d/v5 v5.5.2 + github.com/k3d-io/k3d/v5 v5.6.0 github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 github.com/kubesphere/kubekey/v3 v3.0.7 @@ -164,7 +164,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/daviddengcn/go-colortext v1.0.0 // indirect @@ -387,8 +387,7 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect + go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/term v0.12.0 // indirect @@ -407,7 +406,6 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect k8s.io/apiserver v0.28.1 // indirect k8s.io/component-helpers v0.28.2 // indirect oras.land/oras-go v1.2.4 // indirect diff --git a/go.sum b/go.sum index 17eaec0d452..8b6ca3de5fd 100644 --- a/go.sum +++ b/go.sum @@ -641,8 +641,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM= github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/dapr/kit v0.11.3 h1:u1X92tE8xsrwXIej7nkcI5Z1t1CFznPwlL18tizNEw4= @@ -699,7 +699,6 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= @@ -1195,8 +1194,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k3d-io/k3d/v5 v5.5.2 h1:VEkopEqTUBpGJghjltWqv1jI57MLKFaxWt2yBp2lZmE= -github.com/k3d-io/k3d/v5 v5.5.2/go.mod h1:PA0IkO8CB2OsBpBO3rJwskmA69Ibb9qdFiUGE/8IqUA= +github.com/k3d-io/k3d/v5 v5.6.0 h1:XMRSQXyPErOcDCdOJVi6HUPjJZuWd/N6Dss7QeCDRhk= +github.com/k3d-io/k3d/v5 v5.6.0/go.mod h1:t/hRD2heCSkO9TJJdzFT72jXGCY8PjsCsClgjcmMoAA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -1844,13 +1843,8 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= -go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28 h1:zLxFnORHDFTSkJPawMU7LzsuGQJ4MUFS653jJHpORow= +go4.org/netipx v0.0.0-20230728184502-ec4c8b891b28/go.mod h1:TQvodOM+hJTioNQJilmLXu08JNb8i+ccq418+KWu1/Y= golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2546,8 +2540,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU= -inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= From 4e66a362503c71d07993c58c3eaecfb15c2cecb6 Mon Sep 17 00:00:00 2001 From: xuriwuyun Date: Thu, 28 Sep 2023 13:43:47 +0800 Subject: [PATCH 54/58] fix: lorry leave member (#5246) --- lorry/binding/base.go | 14 ++++++++------ lorry/client/client.go | 4 ++-- lorry/component/dbmanager.go | 12 ++++++++++-- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/lorry/binding/base.go b/lorry/binding/base.go index c1d71efa3c7..672f8f36c36 100644 --- a/lorry/binding/base.go +++ b/lorry/binding/base.go @@ -429,10 +429,11 @@ func (ops *BaseOperations) SwitchoverOps(ctx context.Context, req *ProbeRequest, func (ops *BaseOperations) JoinMemberOps(ctx context.Context, req *ProbeRequest, resp *ProbeResponse) (OpsResult, error) { opsRes := OpsResult{} manager, err := component.GetDefaultManager() - if err != nil { - opsRes["event"] = OperationFailed + if manager == nil { + // manager for the DB is not supported, just return + opsRes["event"] = OperationSuccess opsRes["message"] = err.Error() - return opsRes, err + return opsRes, nil } dcsStore := dcs.GetStore() @@ -468,10 +469,11 @@ func (ops *BaseOperations) JoinMemberOps(ctx context.Context, req *ProbeRequest, func (ops *BaseOperations) LeaveMemberOps(ctx context.Context, req *ProbeRequest, resp *ProbeResponse) (OpsResult, error) { opsRes := OpsResult{} manager, err := component.GetDefaultManager() - if err != nil { - opsRes["event"] = OperationFailed + if manager == nil { + // manager for the DB is not supported, just return + opsRes["event"] = OperationSuccess opsRes["message"] = err.Error() - return opsRes, err + return opsRes, nil } dcsStore := dcs.GetStore() diff --git a/lorry/client/client.go b/lorry/client/client.go index a4b22558c74..ac4867398a1 100644 --- a/lorry/client/client.go +++ b/lorry/client/client.go @@ -39,7 +39,7 @@ import ( ) const ( - urlTemplate = "http://localhost:%d/v1.0/bindings/%s" + urlTemplate = "http://%s:%d/v1.0/bindings/%s" ) type Client interface { @@ -123,7 +123,7 @@ func NewClientWithPod(pod *corev1.Pod, characterType string) (*OperationClient, Client: client, Port: port, CharacterType: characterType, - URL: fmt.Sprintf(urlTemplate, port, characterType), + URL: fmt.Sprintf(urlTemplate, ip, port, characterType), CacheTTL: 60 * time.Second, RequestTimeout: 30 * time.Second, ReconcileTimeout: 500 * time.Millisecond, diff --git a/lorry/component/dbmanager.go b/lorry/component/dbmanager.go index 2ed4db855b9..9406e351388 100644 --- a/lorry/component/dbmanager.go +++ b/lorry/component/dbmanager.go @@ -222,6 +222,14 @@ func (mgr *DBManagerBase) ShutDownWithWait() { mgr.Logger.Info("Override me if need") } +func (*DBManagerBase) JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error { + return nil +} + +func (*DBManagerBase) LeaveMemberFromCluster(context.Context, *dcs.Cluster, string) error { + return nil +} + func RegisterManager(characterType, workloadType string, manager DBManager) { key := strings.ToLower(characterType + "_" + workloadType) managers[key] = manager @@ -306,11 +314,11 @@ func (*FakeManager) IsFirstMember() bool { } func (*FakeManager) JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error { - return fmt.Errorf("NotSupported") + return nil } func (*FakeManager) LeaveMemberFromCluster(context.Context, *dcs.Cluster, string) error { - return fmt.Errorf("NotSuppported") + return nil } func (*FakeManager) Promote(context.Context, *dcs.Cluster) error { From beca3452bc9a17179938cba6cd00ec6bf0662501 Mon Sep 17 00:00:00 2001 From: huangzhangshu <109708205+JashBook@users.noreply.github.com> Date: Thu, 28 Sep 2023 18:05:16 +0800 Subject: [PATCH 55/58] chore: COPY go. mod instead of bind (#5309) --- .github/workflows/cicd-pull-request.yml | 2 +- .github/workflows/cicd-push.yml | 2 +- .github/workflows/release-image.yml | 2 +- docker/Dockerfile | 7 +++---- docker/Dockerfile-dataprotection | 7 +++---- docker/Dockerfile-tools | 7 +++---- 6 files changed, 12 insertions(+), 15 deletions(-) diff --git a/.github/workflows/cicd-pull-request.yml b/.github/workflows/cicd-pull-request.yml index 65d981cd6ca..66bd6e4e9c3 100644 --- a/.github/workflows/cicd-pull-request.yml +++ b/.github/workflows/cicd-pull-request.yml @@ -156,7 +156,7 @@ jobs: if: contains(needs.trigger-mode.outputs.trigger-mode, '[docker]') uses: apecloud/apecloud-cd/.github/workflows/release-image-check.yml@v0.1.24 with: - MAKE_OPS_PRE: "generate test-go-generate" + MAKE_OPS_PRE: "module generate test-go-generate" IMG: "apecloud/kubeblocks-tools" GO_VERSION: "1.21" BUILDX_PLATFORMS: "linux/amd64" diff --git a/.github/workflows/cicd-push.yml b/.github/workflows/cicd-push.yml index 86136657854..d682d5beb6d 100644 --- a/.github/workflows/cicd-push.yml +++ b/.github/workflows/cicd-push.yml @@ -205,7 +205,7 @@ jobs: if: ${{ contains(needs.trigger-mode.outputs.trigger-mode, '[docker]') }} uses: apecloud/apecloud-cd/.github/workflows/release-image-check.yml@v0.1.24 with: - MAKE_OPS_PRE: "generate test-go-generate" + MAKE_OPS_PRE: "module generate test-go-generate" IMG: "apecloud/kubeblocks-tools" GO_VERSION: "1.21" BUILDX_PLATFORMS: "linux/amd64" diff --git a/.github/workflows/release-image.yml b/.github/workflows/release-image.yml index eab510cd9ff..00f1b4c62b2 100644 --- a/.github/workflows/release-image.yml +++ b/.github/workflows/release-image.yml @@ -54,7 +54,7 @@ jobs: needs: image-tag uses: apecloud/apecloud-cd/.github/workflows/release-image-cache.yml@v0.1.24 with: - MAKE_OPS_PRE: "generate test-go-generate" + MAKE_OPS_PRE: "module generate test-go-generate" IMG: "apecloud/kubeblocks-tools" VERSION: "${{ needs.image-tag.outputs.tag-name }}" GO_VERSION: "1.21" diff --git a/docker/Dockerfile b/docker/Dockerfile index dc92fe13020..7800c67aa68 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -26,12 +26,11 @@ ENV GOPROXY=${GOPROXY} WORKDIR /src # Copy the Go Modules manifests -#COPY go.mod go.mod -#COPY go.sum go.sum +COPY go.mod go.mod +COPY go.sum go.sum # cache deps before building and copying source so that we don't need to re-download as much # and so that source changes don't invalidate our downloaded layer -RUN --mount=type=bind,target=. \ - --mount=type=cache,target=/go/pkg/mod \ +RUN --mount=type=cache,target=/go/pkg/mod \ go mod download # Copy the go source diff --git a/docker/Dockerfile-dataprotection b/docker/Dockerfile-dataprotection index d571c22f008..45b6349d545 100644 --- a/docker/Dockerfile-dataprotection +++ b/docker/Dockerfile-dataprotection @@ -26,12 +26,11 @@ ENV GOPROXY=${GOPROXY} WORKDIR /src # Copy the Go Modules manifests -#COPY go.mod go.mod -#COPY go.sum go.sum +COPY go.mod go.mod +COPY go.sum go.sum # cache deps before building and copying source so that we don't need to re-download as much # and so that source changes don't invalidate our downloaded layer -RUN --mount=type=bind,target=. \ - --mount=type=cache,target=/go/pkg/mod \ +RUN --mount=type=cache,target=/go/pkg/mod \ go mod download # Copy the go source diff --git a/docker/Dockerfile-tools b/docker/Dockerfile-tools index 7110809899b..ea7f76feb7b 100644 --- a/docker/Dockerfile-tools +++ b/docker/Dockerfile-tools @@ -28,8 +28,8 @@ ENV GOPROXY=${GOPROXY} WORKDIR /src # Copy the Go Modules manifests -#COPY go.mod go.mod -#COPY go.sum go.sum +COPY go.mod go.mod +COPY go.sum go.sum # cache deps before building and copying source so that we don't need to re-download as much # and so that source changes don't invalidate our downloaded layer # RUN go mod download @@ -44,8 +44,7 @@ WORKDIR /src #COPY cmd/cli/ cmd/cli/ #COPY apis/ apis/ #COPY test/testdata/testdata.go test/testdata/testdata.go -RUN --mount=type=bind,target=. \ - --mount=type=cache,target=/go/pkg/mod \ +RUN --mount=type=cache,target=/go/pkg/mod \ go mod download # Build From 2536a1cf0d0e21985b5f8f2e2b2943af7d9dfb61 Mon Sep 17 00:00:00 2001 From: dingben Date: Sat, 30 Sep 2023 17:04:54 +0800 Subject: [PATCH 56/58] feat: cli plugin adapt the change of block index repository (#5305) --- internal/cli/cmd/plugin/types.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/cli/cmd/plugin/types.go b/internal/cli/cmd/plugin/types.go index e05767dc8e4..1480511352c 100644 --- a/internal/cli/cmd/plugin/types.go +++ b/internal/cli/cmd/plugin/types.go @@ -67,6 +67,9 @@ func (p *Paths) IndexPluginsPath(name string) []string { if _, err := os.Stat(filepath.Join(p.IndexPath(name), "krew-plugins")); err == nil { result = append(result, filepath.Join(p.IndexPath(name), "krew-plugins")) } + if _, err := os.Stat(filepath.Join(p.IndexPath(name), "cli-plugins")); err == nil { + result = append(result, filepath.Join(p.IndexPath(name), "cli-plugins")) + } return result } From bc7b19311318d8a5cf0b849b20e53b4812f73f0a Mon Sep 17 00:00:00 2001 From: a le <101848970+1aal@users.noreply.github.com> Date: Thu, 5 Oct 2023 18:19:50 +0800 Subject: [PATCH 57/58] fix: supply the docker error info when playground init locally failed (#5315) --- internal/cli/util/version.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/internal/cli/util/version.go b/internal/cli/util/version.go index 9f2cd2ed4d8..ae24154ae4a 100644 --- a/internal/cli/util/version.go +++ b/internal/cli/util/version.go @@ -20,6 +20,7 @@ along with this program. If not, see . package util import ( + "bytes" "context" "fmt" "os/exec" @@ -128,9 +129,15 @@ func GetKubeBlocksDeploy(client kubernetes.Interface) (*appsv1.Deployment, error func GetDockerVersion() (*gv.Version, error) { // exec cmd to get output from docker info --format '{{.ServerVersion}}' cmd := exec.Command("docker", "info", "--format", "{{.ServerVersion}}") + var stderr bytes.Buffer + cmd.Stderr = &stderr out, err := cmd.Output() - if err != nil { - return nil, err + if err != nil || stderr.String() != "" { + errMsg := stderr.String() + if errMsg == "" { + errMsg = err.Error() + } + return nil, fmt.Errorf("failed to get the docker version by executing \"docker info --format {{.ServerVersion}}\": %s", errMsg) } return gv.NewVersion(strings.TrimSpace(string(out))) } From 1f87a871a8fefc06891fd22b68d029b85a443dd6 Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Fri, 6 Oct 2023 21:47:38 -0500 Subject: [PATCH 58/58] chore: refine configuration reconcile and phase (#5311) --- apis/apps/v1alpha1/config.go | 3 +- .../apps.kubeblocks.io_configurations.yaml | 1 + .../configuration/configuration_controller.go | 62 ++++----- .../apps/configuration/reconcile_task.go | 77 ++++++---- controllers/apps/operations/reconfigure.go | 9 +- .../apps.kubeblocks.io_configurations.yaml | 1 + internal/controller/configuration/pipeline.go | 38 +---- internal/controllerutil/config_util.go | 50 +++++++ internal/controllerutil/config_util_test.go | 131 ++++++++++++++++++ 9 files changed, 271 insertions(+), 101 deletions(-) diff --git a/apis/apps/v1alpha1/config.go b/apis/apps/v1alpha1/config.go index bc91687dd17..c0cf6afbe1c 100644 --- a/apis/apps/v1alpha1/config.go +++ b/apis/apps/v1alpha1/config.go @@ -72,10 +72,11 @@ package v1alpha1 // ConfigurationPhase defines the Configuration FSM phase // +enum -// +kubebuilder:validation:Enum={Init,Running,Pending,Merged,MergeFailed,FailedAndPause,Upgrading,Deleting,FailedAndRetry,Finished} +// +kubebuilder:validation:Enum={Creating,Init,Running,Pending,Merged,MergeFailed,FailedAndPause,Upgrading,Deleting,FailedAndRetry,Finished} type ConfigurationPhase string const ( + CCreatingPhase ConfigurationPhase = "Creating" CInitPhase ConfigurationPhase = "Init" CRunningPhase ConfigurationPhase = "Running" CPendingPhase ConfigurationPhase = "Pending" diff --git a/config/crd/bases/apps.kubeblocks.io_configurations.yaml b/config/crd/bases/apps.kubeblocks.io_configurations.yaml index 627422ccfe9..7dbc26db996 100644 --- a/config/crd/bases/apps.kubeblocks.io_configurations.yaml +++ b/config/crd/bases/apps.kubeblocks.io_configurations.yaml @@ -310,6 +310,7 @@ spec: phase: description: phase is status of configurationItem. enum: + - Creating - Init - Running - Pending diff --git a/controllers/apps/configuration/configuration_controller.go b/controllers/apps/configuration/configuration_controller.go index 1d0881d8993..9208d02a498 100644 --- a/controllers/apps/configuration/configuration_controller.go +++ b/controllers/apps/configuration/configuration_controller.go @@ -107,7 +107,7 @@ func (r *ConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Reques return r.failWithInvalidComponent(configuration, reqCtx) } - if err := r.runTasks(reqCtx, configuration, fetcherTask, tasks); err != nil { + if err := r.runTasks(TaskContext{configuration, reqCtx, fetcherTask}, tasks); err != nil { return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "failed to run configuration reconcile task.") } if !isAllReady(configuration) { @@ -130,55 +130,52 @@ func (r *ConfigurationReconciler) failWithInvalidComponent(configuration *appsv1 func isAllReady(configuration *appsv1alpha1.Configuration) bool { for _, item := range configuration.Spec.ConfigItemDetails { itemStatus := configuration.Status.GetItemStatus(item.Name) - if itemStatus == nil || itemStatus.Phase != appsv1alpha1.CFinishedPhase { + if itemStatus != nil && !isFinishStatus(itemStatus.Phase) { return false } } return true } -func (r *ConfigurationReconciler) runTasks( - reqCtx intctrlutil.RequestCtx, - configuration *appsv1alpha1.Configuration, - fetcher *Task, - tasks []Task) (err error) { - var errs []error - var synthesizedComp *component.SynthesizedComponent - - synthesizedComp, err = component.BuildComponent(reqCtx, nil, - fetcher.ClusterObj, - fetcher.ClusterDefObj, - fetcher.ClusterDefComObj, - fetcher.ClusterComObj, +func (r *ConfigurationReconciler) runTasks(taskCtx TaskContext, tasks []Task) (err error) { + var ( + errs []error + synthesizedComp *component.SynthesizedComponent + + ctx = taskCtx.reqCtx.Ctx + configuration = taskCtx.configuration + ) + + synthesizedComp, err = component.BuildComponent(taskCtx.reqCtx, + nil, + taskCtx.fetcher.ClusterObj, + taskCtx.fetcher.ClusterDefObj, + taskCtx.fetcher.ClusterDefComObj, + taskCtx.fetcher.ClusterComObj, nil, - fetcher.ClusterVerComObj) + taskCtx.fetcher.ClusterVerComObj) if err != nil { return err } + // TODO manager multiple version patch := client.MergeFrom(configuration.DeepCopy()) revision := strconv.FormatInt(configuration.GetGeneration(), 10) for _, task := range tasks { - if err := task.Do(fetcher, synthesizedComp, revision); err != nil { + task.Status.UpdateRevision = revision + if err := task.Do(taskCtx.fetcher, synthesizedComp, revision); err != nil { task.Status.Phase = appsv1alpha1.CMergeFailedPhase task.Status.Message = cfgutil.ToPointer(err.Error()) errs = append(errs, err) continue } - task.Status.UpdateRevision = revision - task.Status.Phase = appsv1alpha1.CMergedPhase - if err := task.SyncStatus(fetcher, task.Status); err != nil { - task.Status.Phase = appsv1alpha1.CFailedPhase - task.Status.Message = cfgutil.ToPointer(err.Error()) - errs = append(errs, err) - } } configuration.Status.Message = "" if len(errs) > 0 { configuration.Status.Message = utilerrors.NewAggregate(errs).Error() } - if err := r.Client.Status().Patch(reqCtx.Ctx, configuration, patch); err != nil { + if err := r.Client.Status().Patch(ctx, configuration, patch); err != nil { errs = append(errs, err) } if len(errs) == 0 { @@ -216,12 +213,11 @@ func fromItemStatus(ctx intctrlutil.RequestCtx, status *appsv1alpha1.Configurati } func isReconcileStatus(phase appsv1alpha1.ConfigurationPhase) bool { - return phase == appsv1alpha1.CRunningPhase || - phase == appsv1alpha1.CInitPhase || - phase == appsv1alpha1.CPendingPhase || - phase == appsv1alpha1.CFailedPhase || - phase == appsv1alpha1.CMergedPhase || - phase == appsv1alpha1.CMergeFailedPhase || - phase == appsv1alpha1.CUpgradingPhase || - phase == appsv1alpha1.CFinishedPhase + return phase != "" && + phase != appsv1alpha1.CCreatingPhase && + phase != appsv1alpha1.CDeletingPhase +} + +func isFinishStatus(phase appsv1alpha1.ConfigurationPhase) bool { + return phase == appsv1alpha1.CFinishedPhase || phase == appsv1alpha1.CFailedAndPausePhase } diff --git a/controllers/apps/configuration/reconcile_task.go b/controllers/apps/configuration/reconcile_task.go index 3f4454281b4..03f1510682a 100644 --- a/controllers/apps/configuration/reconcile_task.go +++ b/controllers/apps/configuration/reconcile_task.go @@ -22,6 +22,8 @@ package configuration import ( "strconv" + corev1 "k8s.io/api/core/v1" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" "github.com/apecloud/kubeblocks/internal/controller/component" @@ -35,46 +37,73 @@ type Task struct { Status *appsv1alpha1.ConfigurationItemDetailStatus Name string - Do func(fetcher *Task, component *component.SynthesizedComponent, revision string) error - SyncStatus func(fetcher *Task, status *appsv1alpha1.ConfigurationItemDetailStatus) error + Do func(fetcher *Task, component *component.SynthesizedComponent, revision string) error +} + +type TaskContext struct { + configuration *appsv1alpha1.Configuration + reqCtx intctrlutil.RequestCtx + fetcher *Task } func NewTask(item appsv1alpha1.ConfigurationItemDetail, status *appsv1alpha1.ConfigurationItemDetailStatus) Task { return Task{ - Name: item.Name, - Status: status, + Name: item.Name, Do: func(fetcher *Task, synComponent *component.SynthesizedComponent, revision string) error { configSpec := item.ConfigSpec if configSpec == nil { return core.MakeError("not found config spec: %s", item.Name) } - reconcileTask := configuration.NewReconcilePipeline(configuration.ReconcileCtx{ - ResourceCtx: fetcher.ResourceCtx, - Cluster: fetcher.ClusterObj, - ClusterVer: fetcher.ClusterVerObj, - Component: synComponent, - PodSpec: synComponent.PodSpec, - }, item, status, configSpec) - return reconcileTask.ConfigMap(item.Name). - ConfigConstraints(configSpec.ConfigConstraintRef). - PrepareForTemplate(). - RerenderTemplate(). - ApplyParameters(). - UpdateConfigVersion(revision). - Sync(). - Complete() + if err := fetcher.ConfigMap(item.Name).Complete(); err != nil { + return err + } + // Do reconcile for config template + configMap := fetcher.ConfigMapObj + switch intctrlutil.GetConfigSpecReconcilePhase(configMap, item, status) { + default: + return syncStatus(configMap, status) + case appsv1alpha1.CPendingPhase, + appsv1alpha1.CMergeFailedPhase: + return syncImpl(fetcher, item, status, synComponent, revision, configSpec) + case appsv1alpha1.CCreatingPhase: + return nil + } }, - SyncStatus: syncStatus, + Status: status, } } -func syncStatus(fetcher *Task, status *appsv1alpha1.ConfigurationItemDetailStatus) (err error) { - err = fetcher.ConfigMap(status.Name).Complete() +func syncImpl(fetcher *Task, + item appsv1alpha1.ConfigurationItemDetail, + status *appsv1alpha1.ConfigurationItemDetailStatus, + component *component.SynthesizedComponent, + revision string, + configSpec *appsv1alpha1.ComponentConfigSpec) (err error) { + err = configuration.NewReconcilePipeline(configuration.ReconcileCtx{ + ResourceCtx: fetcher.ResourceCtx, + Cluster: fetcher.ClusterObj, + ClusterVer: fetcher.ClusterVerObj, + Component: component, + PodSpec: component.PodSpec, + }, item, status, configSpec). + ConfigMap(item.Name). + ConfigConstraints(configSpec.ConfigConstraintRef). + PrepareForTemplate(). + RerenderTemplate(). + ApplyParameters(). + UpdateConfigVersion(revision). + Sync(). + Complete() if err != nil { - return + status.Phase = appsv1alpha1.CMergeFailedPhase + } else { + status.Phase = appsv1alpha1.CMergedPhase } + return +} - annotations := fetcher.ConfigMapObj.GetAnnotations() +func syncStatus(configMap *corev1.ConfigMap, status *appsv1alpha1.ConfigurationItemDetailStatus) (err error) { + annotations := configMap.GetAnnotations() // status.CurrentRevision = GetCurrentRevision(annotations) revisions := RetrieveRevision(annotations) if len(revisions) == 0 { diff --git a/controllers/apps/operations/reconfigure.go b/controllers/apps/operations/reconfigure.go index c0a978c6070..a88ea5c3872 100644 --- a/controllers/apps/operations/reconfigure.go +++ b/controllers/apps/operations/reconfigure.go @@ -28,7 +28,6 @@ import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" - "github.com/apecloud/kubeblocks/internal/controller/configuration" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) @@ -216,15 +215,11 @@ func (r *reconfigureAction) syncReconfigureOperatorStatus(ctx intctrlutil.Reques } item := fetcher.ConfigurationObj.Spec.GetConfigurationItem(configSpec.Name) - status := fetcher.ConfigurationObj.Status.GetItemStatus(configSpec.Name) - if status == nil || item == nil { + if item == nil { return appsv1alpha1.OpsRunningPhase, nil } - if !configuration.IsApplyConfigChanged(fetcher.ConfigMapObj, *item) { - return appsv1alpha1.OpsRunningPhase, nil - } - switch status.Phase { + switch intctrlutil.GetConfigSpecReconcilePhase(fetcher.ConfigMapObj, *item, fetcher.ConfigurationObj.Status.GetItemStatus(configSpec.Name)) { default: return appsv1alpha1.OpsRunningPhase, nil case appsv1alpha1.CFailedAndPausePhase: diff --git a/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml b/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml index 627422ccfe9..7dbc26db996 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_configurations.yaml @@ -310,6 +310,7 @@ spec: phase: description: phase is status of configurationItem. enum: + - Creating - Init - Running - Pending diff --git a/internal/controller/configuration/pipeline.go b/internal/controller/configuration/pipeline.go index 2a0e47e8035..cdc159c093b 100644 --- a/internal/controller/configuration/pipeline.go +++ b/internal/controller/configuration/pipeline.go @@ -20,8 +20,6 @@ along with this program. If not, see . package configuration import ( - "encoding/json" - "reflect" "strconv" corev1 "k8s.io/api/core/v1" @@ -257,7 +255,7 @@ func (p *updatePipeline) isDone() bool { func (p *updatePipeline) PrepareForTemplate() *updatePipeline { buildTemplate := func() (err error) { - p.reconcile = !IsApplyConfigChanged(p.ConfigMapObj, p.item) + p.reconcile = !intctrlutil.IsApplyConfigChanged(p.ConfigMapObj, p.item) if p.isDone() { return } @@ -272,23 +270,6 @@ func (p *updatePipeline) PrepareForTemplate() *updatePipeline { return p.Wrap(buildTemplate) } -func IsApplyConfigChanged(cm *corev1.ConfigMap, item appsv1alpha1.ConfigurationItemDetail) bool { - if cm == nil { - return false - } - - lastAppliedVersion, ok := cm.Annotations[constant.ConfigAppliedVersionAnnotationKey] - if !ok { - return false - } - var target appsv1alpha1.ConfigurationItemDetail - if err := json.Unmarshal([]byte(lastAppliedVersion), &target); err != nil { - return false - } - - return reflect.DeepEqual(target, item) -} - func (p *updatePipeline) ConfigSpec() *appsv1alpha1.ComponentConfigSpec { return p.configSpec } @@ -310,7 +291,7 @@ func (p *updatePipeline) RerenderTemplate() *updatePipeline { if p.isDone() { return } - if needRerender(p.ConfigMapObj, p.item) { + if intctrlutil.IsRerender(p.ConfigMapObj, p.item) { p.newCM, err = p.renderWrapper.rerenderConfigTemplate(p.ctx.Cluster, p.ctx.Component, *p.configSpec, &p.item) } else { p.newCM = p.ConfigMapObj.DeepCopy() @@ -409,18 +390,3 @@ func (p *updatePipeline) SyncStatus() *updatePipeline { return }) } - -func needRerender(obj *corev1.ConfigMap, item appsv1alpha1.ConfigurationItemDetail) bool { - if obj == nil { - return true - } - if item.Version == "" { - return false - } - - version, ok := obj.Annotations[constant.CMConfigurationTemplateVersion] - if !ok || version != item.Version { - return true - } - return false -} diff --git a/internal/controllerutil/config_util.go b/internal/controllerutil/config_util.go index c524365a3ac..0052dd066d5 100644 --- a/internal/controllerutil/config_util.go +++ b/internal/controllerutil/config_util.go @@ -21,6 +21,8 @@ package controllerutil import ( "context" + "encoding/json" + "reflect" "github.com/StudioSol/set" appsv1 "k8s.io/api/apps/v1" @@ -32,6 +34,7 @@ import ( "github.com/apecloud/kubeblocks/internal/configuration/core" "github.com/apecloud/kubeblocks/internal/configuration/util" "github.com/apecloud/kubeblocks/internal/configuration/validate" + "github.com/apecloud/kubeblocks/internal/constant" ) type ConfigEventContext struct { @@ -116,3 +119,50 @@ func fromUpdatedConfig(m map[string]string, sets *set.LinkedHashSetString) map[s } return r } + +// IsApplyConfigChanged checks if the configuration is changed +func IsApplyConfigChanged(configMap *corev1.ConfigMap, item v1alpha1.ConfigurationItemDetail) bool { + if configMap == nil { + return false + } + + lastAppliedVersion, ok := configMap.Annotations[constant.ConfigAppliedVersionAnnotationKey] + if !ok { + return false + } + var target v1alpha1.ConfigurationItemDetail + if err := json.Unmarshal([]byte(lastAppliedVersion), &target); err != nil { + return false + } + + return reflect.DeepEqual(target, item) +} + +// IsRerender checks if the configuration template is changed +func IsRerender(configMap *corev1.ConfigMap, item v1alpha1.ConfigurationItemDetail) bool { + if configMap == nil { + return true + } + if item.Version == "" { + return false + } + + version, ok := configMap.Annotations[constant.CMConfigurationTemplateVersion] + if !ok || version != item.Version { + return true + } + return false +} + +// GetConfigSpecReconcilePhase gets the configuration phase +func GetConfigSpecReconcilePhase(configMap *corev1.ConfigMap, + item v1alpha1.ConfigurationItemDetail, + status *v1alpha1.ConfigurationItemDetailStatus) v1alpha1.ConfigurationPhase { + if status == nil || status.Phase == "" { + return v1alpha1.CCreatingPhase + } + if !IsApplyConfigChanged(configMap, item) { + return v1alpha1.CPendingPhase + } + return status.Phase +} diff --git a/internal/controllerutil/config_util_test.go b/internal/controllerutil/config_util_test.go index 23edc80ba64..7d91f33540d 100644 --- a/internal/controllerutil/config_util_test.go +++ b/internal/controllerutil/config_util_test.go @@ -28,10 +28,13 @@ import ( . "github.com/onsi/gomega" "github.com/StudioSol/set" + corev1 "k8s.io/api/core/v1" "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/configuration/core" cfgutil "github.com/apecloud/kubeblocks/internal/configuration/util" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" testutil "github.com/apecloud/kubeblocks/internal/testutil/k8s" "github.com/apecloud/kubeblocks/test/testdata" @@ -81,6 +84,134 @@ func TestFromUpdatedConfig(t *testing.T) { } } +func TestIsRerender(t *testing.T) { + type args struct { + cm *corev1.ConfigMap + item v1alpha1.ConfigurationItemDetail + } + tests := []struct { + name string + args args + want bool + }{{ + + name: "test", + args: args{ + cm: nil, + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + }, + }, + want: true, + }, { + name: "test", + args: args{ + cm: builder.NewConfigMapBuilder("default", "test").GetObject(), + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + }, + }, + want: false, + }, { + name: "test", + args: args{ + cm: builder.NewConfigMapBuilder("default", "test"). + GetObject(), + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + Version: "v1", + }, + }, + want: true, + }, { + name: "test", + args: args{ + cm: builder.NewConfigMapBuilder("default", "test"). + AddAnnotations(constant.CMConfigurationTemplateVersion, "v1"). + GetObject(), + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + Version: "v2", + }, + }, + want: true, + }, { + name: "test", + args: args{ + cm: builder.NewConfigMapBuilder("default", "test"). + AddAnnotations(constant.CMConfigurationTemplateVersion, "v1"). + GetObject(), + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + Version: "v1", + }, + }, + want: false, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsRerender(tt.args.cm, tt.args.item); got != tt.want { + t.Errorf("IsRerender() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetConfigSpecReconcilePhase(t *testing.T) { + type args struct { + cm *corev1.ConfigMap + item v1alpha1.ConfigurationItemDetail + status *v1alpha1.ConfigurationItemDetailStatus + } + tests := []struct { + name string + args args + want v1alpha1.ConfigurationPhase + }{{ + name: "test", + args: args{ + cm: nil, + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + }, + }, + want: v1alpha1.CCreatingPhase, + }, { + name: "test", + args: args{ + cm: builder.NewConfigMapBuilder("default", "test").GetObject(), + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + }, + status: &v1alpha1.ConfigurationItemDetailStatus{ + Phase: v1alpha1.CInitPhase, + }, + }, + want: v1alpha1.CPendingPhase, + }, { + name: "test", + args: args{ + cm: builder.NewConfigMapBuilder("default", "test"). + AddAnnotations(constant.ConfigAppliedVersionAnnotationKey, `{"name":"test"}`). + GetObject(), + item: v1alpha1.ConfigurationItemDetail{ + Name: "test", + }, + status: &v1alpha1.ConfigurationItemDetailStatus{ + Phase: v1alpha1.CUpgradingPhase, + }, + }, + want: v1alpha1.CUpgradingPhase, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetConfigSpecReconcilePhase(tt.args.cm, tt.args.item, tt.args.status); got != tt.want { + t.Errorf("GetConfigSpecReconcilePhase() = %v, want %v", got, tt.want) + } + }) + } +} + var _ = Describe("config_util", func() { var k8sMockClient *testutil.K8sClientMockHelper