From af0789aee188f51bb791eccdca4532889660ad07 Mon Sep 17 00:00:00 2001 From: Leon Date: Mon, 30 Oct 2023 15:48:06 +0800 Subject: [PATCH 1/2] backup policy transformer for component --- controllers/apps/cluster_controller.go | 5 +- controllers/apps/component_controller.go | 33 +- .../apps/transformer_backup_policy_tpl.go | 584 ------------------ .../apps/transformer_component_backup.go | 576 +++++++++++++++++ .../transformer_component_backup_policy.go | 39 -- .../transformer_component_load_resources.go | 70 ++- .../apps/transformer_component_restore.go | 39 -- 7 files changed, 650 insertions(+), 696 deletions(-) delete mode 100644 controllers/apps/transformer_backup_policy_tpl.go create mode 100644 controllers/apps/transformer_component_backup.go delete mode 100644 controllers/apps/transformer_component_backup_policy.go delete mode 100644 controllers/apps/transformer_component_restore.go diff --git a/controllers/apps/cluster_controller.go b/controllers/apps/cluster_controller.go index 34609855ad3..ec06d9c5c57 100644 --- a/controllers/apps/cluster_controller.go +++ b/controllers/apps/cluster_controller.go @@ -140,13 +140,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct &ClusterServiceTransformer{}, // create default cluster connection credential secret object &ClusterCredentialTransformer{}, - // TODO(component): handle restore before ClusterComponentTransformer + // handle restore before ClusterComponentTransformer &ClusterRestoreTransformer{Client: r.Client}, // create all cluster components objects &ClusterComponentTransformer{Client: r.Client}, - // TODO(component): transform backupPolicyTemplate to backuppolicy.dataprotection.kubeblocks.io - // and backupschedule.dataprotection.kubeblocks.io - &BackupPolicyTplTransformer{}, // add our finalizer to all objects &ClusterOwnershipTransformer{}, // make all workload objects depending on credential secret diff --git a/controllers/apps/component_controller.go b/controllers/apps/component_controller.go index 0c3bf1e97ef..27a85929550 100644 --- a/controllers/apps/component_controller.go +++ b/controllers/apps/component_controller.go @@ -21,23 +21,23 @@ package apps import ( "context" - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/pkg/constant" + "time" + corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "time" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" viper "github.com/apecloud/kubeblocks/pkg/viperx" ) @@ -124,26 +124,25 @@ func (r *ComponentReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( AddTransformer( // handle component deletion first &componentDeletionTransformer{}, + // handle finalizers and referenced definition labels &componentMetaTransformer{}, - // validate referenced componentDefinition objects existence and availability, and build synthesized component - &componentLoadResourcesTransformer{}, - // do spec & definition consistency validation + // validate referenced componentDefinition objects, and build synthesized component + &componentLoadResourcesTransformer{Client: r.Client}, + // do validation for the spec & definition consistency &componentValidationTransformer{}, - // handle the component PDB + // handle component PDB &componentPDBTransformer{}, - // handle the component services + // handle component services &componentServiceTransformer{}, - // handle the connection credentials + // handle component connection credentials &componentCredentialTransformer{}, // handle tls volume and cert &componentTLSTransformer{}, - // render the component configurations + // render component configurations &componentConfigurationTransformer{Client: r.Client}, - // TODO(component): handle restore before component transformer - &componentRestoreTransformer{}, // handle the component workload &componentWorkloadTransformer{Client: r.Client}, - // TODO(component): transform backupPolicyTemplate to backuppolicy.dataprotection.kubeblocks.io and backupschedule.dataprotection.kubeblocks.io + // generate backuppolicy and backupschedule from backupPolicyTemplate &componentBackupPolicyTransformer{}, // handle RBAC for component workloads &componentRBACTransformer{}, diff --git a/controllers/apps/transformer_backup_policy_tpl.go b/controllers/apps/transformer_backup_policy_tpl.go deleted file mode 100644 index a4bf695386f..00000000000 --- a/controllers/apps/transformer_backup_policy_tpl.go +++ /dev/null @@ -1,584 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - "fmt" - - "golang.org/x/exp/slices" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/graph" - "github.com/apecloud/kubeblocks/pkg/controller/model" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" - dptypes "github.com/apecloud/kubeblocks/pkg/dataprotection/types" - dputils "github.com/apecloud/kubeblocks/pkg/dataprotection/utils" -) - -// BackupPolicyTplTransformer transforms the backup policy template to the data -// protection backup policy and backup schedule. -type BackupPolicyTplTransformer struct { - *clusterTransformContext - - tplCount int - tplIdentifier string - isDefaultTemplate string - - backupPolicyTpl *appsv1alpha1.BackupPolicyTemplate - backupPolicy *appsv1alpha1.BackupPolicy - compWorkloadType appsv1alpha1.WorkloadType -} - -var _ graph.Transformer = &BackupPolicyTplTransformer{} - -// Transform transforms the backup policy template to the backup policy and -// backup schedule. -func (r *BackupPolicyTplTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - r.clusterTransformContext = ctx.(*clusterTransformContext) - graphCli, _ := r.clusterTransformContext.Client.(model.GraphClient) - - clusterDefName := r.ClusterDef.Name - backupPolicyTpls := &appsv1alpha1.BackupPolicyTemplateList{} - if err := r.Client.List(r.Context, backupPolicyTpls, - client.MatchingLabels{constant.ClusterDefLabelKey: clusterDefName}); err != nil { - return err - } - r.tplCount = len(backupPolicyTpls.Items) - if r.tplCount == 0 { - return nil - } - - backupPolicyNames := map[string]struct{}{} - backupScheduleNames := map[string]struct{}{} - for _, tpl := range backupPolicyTpls.Items { - r.isDefaultTemplate = tpl.Annotations[dptypes.DefaultBackupPolicyTemplateAnnotationKey] - r.tplIdentifier = tpl.Spec.Identifier - r.backupPolicyTpl = &tpl - - for i, bp := range tpl.Spec.BackupPolicies { - compDef := r.ClusterDef.GetComponentDefByName(bp.ComponentDefRef) - if compDef == nil { - return intctrlutil.NewNotFound("componentDef %s not found in ClusterDefinition: %s ", - bp.ComponentDefRef, clusterDefName) - } - - r.backupPolicy = &tpl.Spec.BackupPolicies[i] - r.compWorkloadType = compDef.WorkloadType - - transformBackupPolicy := func() *dpv1alpha1.BackupPolicy { - // build the data protection backup policy from the template. - dpBackupPolicy, action := r.transformBackupPolicy() - if dpBackupPolicy == nil { - return nil - } - - // if exist multiple backup policy templates and duplicate spec.identifier, - // the generated backupPolicy may have duplicate names, so it is - // necessary to check if it already exists. - if _, ok := backupPolicyNames[dpBackupPolicy.Name]; ok { - return dpBackupPolicy - } - - switch *action { - case model.CREATE: - graphCli.Create(dag, dpBackupPolicy) - case model.UPDATE: - graphCli.Update(dag, dpBackupPolicy, dpBackupPolicy) - } - backupPolicyNames[dpBackupPolicy.Name] = struct{}{} - return dpBackupPolicy - } - - transformBackupSchedule := func(backupPolicy *dpv1alpha1.BackupPolicy) { - // if backup policy is nil, it means that the backup policy template - // is invalid, backup schedule depends on backup policy, so we do - // not need to transform backup schedule. - if backupPolicy == nil { - return - } - - // only create backup schedule for the default backup policy template - // if there are more than one backup policy templates. - if r.isDefaultTemplate != trueVal && r.tplCount > 1 { - return - } - - // build the data protection backup schedule from the template. - dpBackupSchedule, action := r.transformBackupSchedule(backupPolicy) - - // merge cluster backup configuration into the backup schedule. - // If the backup schedule is nil, create a new backup schedule - // based on the cluster backup configuration. - if dpBackupSchedule == nil { - action = model.ActionCreatePtr() - } else if action == nil { - action = model.ActionUpdatePtr() - } - - // for a cluster, the default backup schedule is created by backup - // policy template, user can also configure cluster backup in the - // cluster custom object, such as enable cluster backup, set backup - // schedule, etc. - // We always prioritize the cluster backup configuration in the - // cluster object, so we need to merge the cluster backup configuration - // into the default backup schedule created by backup policy template - // if it exists. - dpBackupSchedule = r.mergeClusterBackup(backupPolicy, dpBackupSchedule) - if dpBackupSchedule == nil { - return - } - - // if exist multiple backup policy templates and duplicate spec.identifier, - // the backupPolicy that may be generated may have duplicate names, - // and it is necessary to check if it already exists. - if _, ok := backupScheduleNames[dpBackupSchedule.Name]; ok { - return - } - - switch *action { - case model.CREATE: - graphCli.Create(dag, dpBackupSchedule) - case model.UPDATE: - graphCli.Update(dag, dpBackupSchedule, dpBackupSchedule) - } - graphCli.DependOn(dag, backupPolicy, dpBackupSchedule) - backupScheduleNames[dpBackupSchedule.Name] = struct{}{} - } - - // transform backup policy template to data protection backupPolicy - // and backupSchedule - policy := transformBackupPolicy() - transformBackupSchedule(policy) - } - } - return nil -} - -// transformBackupPolicy transforms backup policy template to backup policy. -func (r *BackupPolicyTplTransformer) transformBackupPolicy() (*dpv1alpha1.BackupPolicy, *model.Action) { - cluster := r.OrigCluster - backupPolicyName := generateBackupPolicyName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) - backupPolicy := &dpv1alpha1.BackupPolicy{} - if err := r.Client.Get(r.Context, client.ObjectKey{ - Namespace: cluster.Namespace, - Name: backupPolicyName, - }, backupPolicy); client.IgnoreNotFound(err) != nil { - return nil, nil - } - - if len(backupPolicy.Name) == 0 { - // build a new backup policy by the backup policy template. - return r.buildBackupPolicy(backupPolicyName), model.ActionCreatePtr() - } - - // sync the existing backup policy with the cluster changes - r.syncBackupPolicy(backupPolicy) - return backupPolicy, model.ActionUpdatePtr() -} - -func (r *BackupPolicyTplTransformer) transformBackupSchedule( - backupPolicy *dpv1alpha1.BackupPolicy) (*dpv1alpha1.BackupSchedule, *model.Action) { - cluster := r.OrigCluster - scheduleName := generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) - backupSchedule := &dpv1alpha1.BackupSchedule{} - if err := r.Client.Get(r.Context, client.ObjectKey{ - Namespace: cluster.Namespace, - Name: scheduleName, - }, backupSchedule); client.IgnoreNotFound(err) != nil { - return nil, nil - } - - if len(backupSchedule.Name) == 0 { - // build a new backup schedule from the backup policy template. - return r.buildBackupSchedule(scheduleName, backupPolicy), model.ActionCreatePtr() - } - // sync backup schedule - r.syncBackupSchedule(backupSchedule) - return backupSchedule, model.ActionUpdatePtr() -} - -func (r *BackupPolicyTplTransformer) buildBackupSchedule( - name string, - backupPolicy *dpv1alpha1.BackupPolicy) *dpv1alpha1.BackupSchedule { - cluster := r.OrigCluster - backupSchedule := &dpv1alpha1.BackupSchedule{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: cluster.Namespace, - Labels: r.buildLabels(), - Annotations: r.buildAnnotations(), - }, - Spec: dpv1alpha1.BackupScheduleSpec{ - BackupPolicyName: backupPolicy.Name, - }, - } - - var schedules []dpv1alpha1.SchedulePolicy - for _, s := range r.backupPolicy.Schedules { - schedules = append(schedules, dpv1alpha1.SchedulePolicy{ - BackupMethod: s.BackupMethod, - CronExpression: s.CronExpression, - Enabled: s.Enabled, - RetentionPeriod: r.backupPolicy.RetentionPeriod, - }) - } - backupSchedule.Spec.Schedules = schedules - return backupSchedule -} - -func (r *BackupPolicyTplTransformer) syncBackupSchedule(backupSchedule *dpv1alpha1.BackupSchedule) { - scheduleMethodMap := map[string]struct{}{} - for _, s := range backupSchedule.Spec.Schedules { - scheduleMethodMap[s.BackupMethod] = struct{}{} - } - // sync the newly added schedule policies. - for _, s := range r.backupPolicy.Schedules { - if _, ok := scheduleMethodMap[s.BackupMethod]; ok { - continue - } - backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, dpv1alpha1.SchedulePolicy{ - BackupMethod: s.BackupMethod, - CronExpression: s.CronExpression, - Enabled: s.Enabled, - RetentionPeriod: r.backupPolicy.RetentionPeriod, - }) - } -} - -// syncBackupPolicy syncs labels and annotations of the backup policy with the cluster changes. -func (r *BackupPolicyTplTransformer) syncBackupPolicy(backupPolicy *dpv1alpha1.BackupPolicy) { - // update labels and annotations of the backup policy. - if backupPolicy.Annotations == nil { - backupPolicy.Annotations = map[string]string{} - } - if backupPolicy.Labels == nil { - backupPolicy.Labels = map[string]string{} - } - mergeMap(backupPolicy.Annotations, r.buildAnnotations()) - mergeMap(backupPolicy.Labels, r.buildLabels()) - - // update backup repo of the backup policy. - if r.Cluster.Spec.Backup != nil && r.Cluster.Spec.Backup.RepoName != "" { - backupPolicy.Spec.BackupRepoName = &r.Cluster.Spec.Backup.RepoName - } - - r.syncBackupMethods(backupPolicy) - - // only update the role labelSelector of the backup target instance when - // component workload is Replication/Consensus. Because the replicas of - // component will change, such as 2->1. then if the target role is 'follower' - // and replicas is 1, the target instance can not be found. so we sync the - // label selector automatically. - if !workloadHasRoleLabel(r.compWorkloadType) { - return - } - - comp := r.getClusterComponentSpec() - if comp == nil { - return - } - - // convert role labelSelector based on the replicas of the component automatically. - // TODO(ldm): need more review. - role := r.backupPolicy.Target.Role - if len(role) == 0 { - return - } - - podSelector := backupPolicy.Spec.Target.PodSelector - if podSelector.LabelSelector == nil || podSelector.LabelSelector.MatchLabels == nil { - podSelector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} - } - if r.getCompReplicas() == 1 { - delete(podSelector.LabelSelector.MatchLabels, constant.RoleLabelKey) - } else { - podSelector.LabelSelector.MatchLabels[constant.RoleLabelKey] = role - } -} - -func (r *BackupPolicyTplTransformer) getCompReplicas() int32 { - rsm := &workloads.ReplicatedStateMachine{} - compSpec := r.getClusterComponentSpec() - rsmName := fmt.Sprintf("%s-%s", r.Cluster.Name, compSpec.Name) - if err := r.Client.Get(r.Context, client.ObjectKey{Name: rsmName, Namespace: r.Cluster.Namespace}, rsm); err != nil { - return compSpec.Replicas - } - return *rsm.Spec.Replicas -} - -// buildBackupPolicy builds a new backup policy by the backup policy template. -func (r *BackupPolicyTplTransformer) buildBackupPolicy(backupPolicyName string) *dpv1alpha1.BackupPolicy { - comp := r.getClusterComponentSpec() - if comp == nil { - return nil - } - - cluster := r.OrigCluster - backupPolicy := &dpv1alpha1.BackupPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Name: backupPolicyName, - Namespace: cluster.Namespace, - Labels: r.buildLabels(), - Annotations: r.buildAnnotations(), - }, - } - r.syncBackupMethods(backupPolicy) - bpSpec := backupPolicy.Spec - // if cluster have backup repo, set backup repo name to backup policy. - if cluster.Spec.Backup != nil && cluster.Spec.Backup.RepoName != "" { - bpSpec.BackupRepoName = &cluster.Spec.Backup.RepoName - } - bpSpec.PathPrefix = buildBackupPathPrefix(cluster, comp.Name) - bpSpec.Target = r.buildBackupTarget(comp) - backupPolicy.Spec = bpSpec - return backupPolicy -} - -// syncBackupMethods syncs the backupMethod of tpl to backupPolicy. -func (r *BackupPolicyTplTransformer) syncBackupMethods(backupPolicy *dpv1alpha1.BackupPolicy) { - var backupMethods []dpv1alpha1.BackupMethod - for _, v := range r.backupPolicy.BackupMethods { - mappingEnv := r.doEnvMapping(v.EnvMapping) - v.BackupMethod.Env = dputils.MergeEnv(v.BackupMethod.Env, mappingEnv) - backupMethods = append(backupMethods, v.BackupMethod) - } - backupPolicy.Spec.BackupMethods = backupMethods -} - -func (r *BackupPolicyTplTransformer) doEnvMapping(envMapping []appsv1alpha1.EnvMappingVar) []corev1.EnvVar { - var env []corev1.EnvVar - for _, v := range envMapping { - for _, cv := range v.ValueFrom.ClusterVersionRef { - if !slices.Contains(cv.Names, r.Cluster.Spec.ClusterVersionRef) { - continue - } - env = append(env, corev1.EnvVar{ - Name: v.Key, - Value: cv.MappingValue, - }) - } - } - return env -} - -func (r *BackupPolicyTplTransformer) buildBackupTarget( - comp *appsv1alpha1.ClusterComponentSpec) *dpv1alpha1.BackupTarget { - targetTpl := r.backupPolicy.Target - clusterName := r.OrigCluster.Name - - getSAName := func() string { - if comp.ServiceAccountName != "" { - return comp.ServiceAccountName - } - return "kb-" + r.Cluster.Name - } - - // build the target connection credential - cc := dpv1alpha1.ConnectionCredential{} - if len(targetTpl.Account) > 0 { - cc.SecretName = fmt.Sprintf("%s-%s-%s", clusterName, comp.Name, targetTpl.Account) - cc.PasswordKey = constant.AccountPasswdForSecret - cc.PasswordKey = constant.AccountNameForSecret - } else { - cc.SecretName = constant.GenerateDefaultConnCredential(clusterName) - ccKey := targetTpl.ConnectionCredentialKey - if ccKey.PasswordKey != nil { - cc.PasswordKey = *ccKey.PasswordKey - } - if ccKey.UsernameKey != nil { - cc.UsernameKey = *ccKey.UsernameKey - } - if ccKey.PortKey != nil { - cc.PortKey = *ccKey.PortKey - } - if ccKey.HostKey != nil { - cc.HostKey = *ccKey.HostKey - } - } - - target := &dpv1alpha1.BackupTarget{ - PodSelector: &dpv1alpha1.PodSelector{ - Strategy: dpv1alpha1.PodSelectionStrategyAny, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: r.buildTargetPodLabels(comp), - }, - }, - ConnectionCredential: &cc, - ServiceAccountName: getSAName(), - } - return target -} - -func (r *BackupPolicyTplTransformer) mergeClusterBackup( - backupPolicy *dpv1alpha1.BackupPolicy, - backupSchedule *dpv1alpha1.BackupSchedule) *dpv1alpha1.BackupSchedule { - cluster := r.OrigCluster - backupEnabled := func() bool { - return cluster.Spec.Backup != nil && boolValue(cluster.Spec.Backup.Enabled) - } - - if backupPolicy == nil || cluster.Spec.Backup == nil { - // backup policy is nil, can not enable cluster backup, so record event and return. - if backupEnabled() { - r.EventRecorder.Event(r.Cluster, corev1.EventTypeWarning, - "BackupPolicyNotFound", "backup policy is nil, can not enable cluster backup") - } - return backupSchedule - } - - backup := cluster.Spec.Backup - // there is no backup schedule created by backup policy template, so we need to - // create a new backup schedule for cluster backup. - if backupSchedule == nil { - backupSchedule = &dpv1alpha1.BackupSchedule{ - ObjectMeta: metav1.ObjectMeta{ - Name: generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier), - Namespace: cluster.Namespace, - Labels: r.buildLabels(), - Annotations: r.buildAnnotations(), - }, - Spec: dpv1alpha1.BackupScheduleSpec{ - BackupPolicyName: backupPolicy.Name, - StartingDeadlineMinutes: backup.StartingDeadlineMinutes, - Schedules: []dpv1alpha1.SchedulePolicy{}, - }, - } - } - - // build backup schedule policy by cluster backup spec - sp := &dpv1alpha1.SchedulePolicy{ - Enabled: backup.Enabled, - RetentionPeriod: backup.RetentionPeriod, - BackupMethod: backup.Method, - CronExpression: backup.CronExpression, - } - - // merge cluster backup schedule policy into backup schedule, if the backup - // schedule with specified method already exists, we need to update it - // using the cluster backup schedule policy. Otherwise, we need to append - // it to the backup schedule. - for i, s := range backupSchedule.Spec.Schedules { - if s.BackupMethod == backup.Method { - mergeSchedulePolicy(sp, &backupSchedule.Spec.Schedules[i]) - return backupSchedule - } - } - backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, *sp) - return backupSchedule -} - -// getClusterComponentSpec returns the first component name of the componentDefRef. -func (r *BackupPolicyTplTransformer) getClusterComponentSpec() *appsv1alpha1.ClusterComponentSpec { - for _, v := range r.clusterTransformContext.ComponentSpecs { - if v.ComponentDefRef == r.backupPolicy.ComponentDefRef { - return v - } - } - return nil -} - -func (r *BackupPolicyTplTransformer) defaultPolicyAnnotationValue() string { - if r.tplCount > 1 && r.isDefaultTemplate != trueVal { - return "false" - } - return trueVal -} - -func (r *BackupPolicyTplTransformer) buildAnnotations() map[string]string { - annotations := map[string]string{ - dptypes.DefaultBackupPolicyAnnotationKey: r.defaultPolicyAnnotationValue(), - constant.BackupPolicyTemplateAnnotationKey: r.backupPolicyTpl.Name, - } - if r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] != "" { - annotations[dptypes.ReconfigureRefAnnotationKey] = r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] - } - return annotations -} - -func (r *BackupPolicyTplTransformer) buildLabels() map[string]string { - return map[string]string{ - constant.AppInstanceLabelKey: r.OrigCluster.Name, - constant.KBAppComponentDefRefLabelKey: r.backupPolicy.ComponentDefRef, - constant.AppManagedByLabelKey: constant.AppName, - } -} - -// buildTargetPodLabels builds the target labels for the backup policy that will be -// used to select the target pod. -func (r *BackupPolicyTplTransformer) buildTargetPodLabels(comp *appsv1alpha1.ClusterComponentSpec) map[string]string { - labels := map[string]string{ - constant.AppInstanceLabelKey: r.OrigCluster.Name, - constant.KBAppComponentLabelKey: comp.Name, - constant.AppManagedByLabelKey: constant.AppName, - } - // append label to filter specific role of the component. - targetTpl := &r.backupPolicy.Target - if workloadHasRoleLabel(r.compWorkloadType) && - len(targetTpl.Role) > 0 && r.getCompReplicas() > 1 { - // the role only works when the component has multiple replicas. - labels[constant.RoleLabelKey] = targetTpl.Role - } - return labels -} - -// generateBackupPolicyName generates the backup policy name which is created from backup policy template. -func generateBackupPolicyName(clusterName, componentDef, identifier string) string { - if len(identifier) == 0 { - return fmt.Sprintf("%s-%s-backup-policy", clusterName, componentDef) - } - return fmt.Sprintf("%s-%s-backup-policy-%s", clusterName, componentDef, identifier) -} - -// generateBackupScheduleName generates the backup schedule name which is created from backup policy template. -func generateBackupScheduleName(clusterName, componentDef, identifier string) string { - if len(identifier) == 0 { - return fmt.Sprintf("%s-%s-backup-schedule", clusterName, componentDef) - } - return fmt.Sprintf("%s-%s-backup-schedule-%s", clusterName, componentDef, identifier) -} - -func buildBackupPathPrefix(cluster *appsv1alpha1.Cluster, compName string) string { - return fmt.Sprintf("/%s-%s/%s", cluster.Name, cluster.UID, compName) -} - -func workloadHasRoleLabel(workloadType appsv1alpha1.WorkloadType) bool { - return slices.Contains([]appsv1alpha1.WorkloadType{appsv1alpha1.Replication, appsv1alpha1.Consensus}, workloadType) -} - -func mergeSchedulePolicy(src *dpv1alpha1.SchedulePolicy, dst *dpv1alpha1.SchedulePolicy) { - if src.Enabled != nil { - dst.Enabled = src.Enabled - } - if src.RetentionPeriod.String() != "" { - dst.RetentionPeriod = src.RetentionPeriod - } - if src.BackupMethod != "" { - dst.BackupMethod = src.BackupMethod - } - if src.CronExpression != "" { - dst.CronExpression = src.CronExpression - } -} diff --git a/controllers/apps/transformer_component_backup.go b/controllers/apps/transformer_component_backup.go new file mode 100644 index 00000000000..dbe1b912b99 --- /dev/null +++ b/controllers/apps/transformer_component_backup.go @@ -0,0 +1,576 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package apps + +import ( + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/controller/graph" +) + +// componentBackupPolicyTransformer transforms the backup policy template to the data protection backup policy and backup schedule. +type componentBackupPolicyTransformer struct { + *componentTransformContext + + tplCount int + tplIdentifier string + isDefaultTemplate string + + backupPolicyTpl *appsv1alpha1.BackupPolicyTemplate + backupPolicy *appsv1alpha1.BackupPolicy + compWorkloadType appsv1alpha1.WorkloadType +} + +var _ graph.Transformer = &componentBackupPolicyTransformer{} + +func (r *componentBackupPolicyTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { + return nil +} + +//// Transform transforms the backup policy template to the backup policy and backup schedule. +//func (r *componentBackupPolicyTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { +// r.componentTransformContext = ctx.(*componentTransformContext) +// if model.IsObjectDeleting(r.componentTransformContext.ComponentOrig) { +// return nil +// } +// +// graphCli, _ := r.componentTransformContext.Client.(model.GraphClient) +// +// clusterDefName := r.ClusterDef.Name +// backupPolicyTpls := &appsv1alpha1.BackupPolicyTemplateList{} +// if err := r.Client.List(r.Context, backupPolicyTpls, +// client.MatchingLabels{constant.ClusterDefLabelKey: clusterDefName}); err != nil { +// return err +// } +// r.tplCount = len(backupPolicyTpls.Items) +// if r.tplCount == 0 { +// return nil +// } +// +// backupPolicyNames := map[string]struct{}{} +// backupScheduleNames := map[string]struct{}{} +// for _, tpl := range backupPolicyTpls.Items { +// r.isDefaultTemplate = tpl.Annotations[dptypes.DefaultBackupPolicyTemplateAnnotationKey] +// r.tplIdentifier = tpl.Spec.Identifier +// r.backupPolicyTpl = &tpl +// +// for i, bp := range tpl.Spec.BackupPolicies { +// compDef := r.ClusterDef.GetComponentDefByName(bp.ComponentDefRef) +// if compDef == nil { +// return intctrlutil.NewNotFound("componentDef %s not found in ClusterDefinition: %s ", +// bp.ComponentDefRef, clusterDefName) +// } +// +// r.backupPolicy = &tpl.Spec.BackupPolicies[i] +// r.compWorkloadType = compDef.WorkloadType +// +// transformBackupPolicy := func() *dpv1alpha1.BackupPolicy { +// // build the data protection backup policy from the template. +// dpBackupPolicy, action := r.transformBackupPolicy() +// if dpBackupPolicy == nil { +// return nil +// } +// +// // if exist multiple backup policy templates and duplicate spec.identifier, +// // the generated backupPolicy may have duplicate names, so it is +// // necessary to check if it already exists. +// if _, ok := backupPolicyNames[dpBackupPolicy.Name]; ok { +// return dpBackupPolicy +// } +// +// switch *action { +// case model.CREATE: +// graphCli.Create(dag, dpBackupPolicy) +// case model.UPDATE: +// graphCli.Update(dag, dpBackupPolicy, dpBackupPolicy) +// } +// backupPolicyNames[dpBackupPolicy.Name] = struct{}{} +// return dpBackupPolicy +// } +// +// transformBackupSchedule := func(backupPolicy *dpv1alpha1.BackupPolicy) { +// // if backup policy is nil, it means that the backup policy template +// // is invalid, backup schedule depends on backup policy, so we do +// // not need to transform backup schedule. +// if backupPolicy == nil { +// return +// } +// +// // only create backup schedule for the default backup policy template +// // if there are more than one backup policy templates. +// if r.isDefaultTemplate != trueVal && r.tplCount > 1 { +// return +// } +// +// // build the data protection backup schedule from the template. +// dpBackupSchedule, action := r.transformBackupSchedule(backupPolicy) +// +// // merge cluster backup configuration into the backup schedule. +// // If the backup schedule is nil, create a new backup schedule +// // based on the cluster backup configuration. +// if dpBackupSchedule == nil { +// action = model.ActionCreatePtr() +// } else if action == nil { +// action = model.ActionUpdatePtr() +// } +// +// // for a cluster, the default backup schedule is created by backup +// // policy template, user can also configure cluster backup in the +// // cluster custom object, such as enable cluster backup, set backup +// // schedule, etc. +// // We always prioritize the cluster backup configuration in the +// // cluster object, so we need to merge the cluster backup configuration +// // into the default backup schedule created by backup policy template +// // if it exists. +// dpBackupSchedule = r.mergeClusterBackup(backupPolicy, dpBackupSchedule) +// if dpBackupSchedule == nil { +// return +// } +// +// // if exist multiple backup policy templates and duplicate spec.identifier, +// // the backupPolicy that may be generated may have duplicate names, +// // and it is necessary to check if it already exists. +// if _, ok := backupScheduleNames[dpBackupSchedule.Name]; ok { +// return +// } +// +// switch *action { +// case model.CREATE: +// graphCli.Create(dag, dpBackupSchedule) +// case model.UPDATE: +// graphCli.Update(dag, dpBackupSchedule, dpBackupSchedule) +// } +// graphCli.DependOn(dag, backupPolicy, dpBackupSchedule) +// backupScheduleNames[dpBackupSchedule.Name] = struct{}{} +// } +// +// // transform backup policy template to data protection backupPolicy +// // and backupSchedule +// policy := transformBackupPolicy() +// transformBackupSchedule(policy) +// } +// } +// return nil +//} +// +//// transformBackupPolicy transforms backup policy template to backup policy. +//func (r *componentBackupPolicyTransformer) transformBackupPolicy() (*dpv1alpha1.BackupPolicy, *model.Action) { +// cluster := r.OrigCluster +// backupPolicyName := generateBackupPolicyName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) +// backupPolicy := &dpv1alpha1.BackupPolicy{} +// if err := r.Client.Get(r.Context, client.ObjectKey{ +// Namespace: cluster.Namespace, +// Name: backupPolicyName, +// }, backupPolicy); client.IgnoreNotFound(err) != nil { +// return nil, nil +// } +// +// if len(backupPolicy.Name) == 0 { +// // build a new backup policy by the backup policy template. +// return r.buildBackupPolicy(backupPolicyName), model.ActionCreatePtr() +// } +// +// // sync the existing backup policy with the cluster changes +// r.syncBackupPolicy(backupPolicy) +// return backupPolicy, model.ActionUpdatePtr() +//} +// +//func (r *componentBackupPolicyTransformer) transformBackupSchedule( +// backupPolicy *dpv1alpha1.BackupPolicy) (*dpv1alpha1.BackupSchedule, *model.Action) { +// cluster := r.OrigCluster +// scheduleName := generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) +// backupSchedule := &dpv1alpha1.BackupSchedule{} +// if err := r.Client.Get(r.Context, client.ObjectKey{ +// Namespace: cluster.Namespace, +// Name: scheduleName, +// }, backupSchedule); client.IgnoreNotFound(err) != nil { +// return nil, nil +// } +// +// if len(backupSchedule.Name) == 0 { +// // build a new backup schedule from the backup policy template. +// return r.buildBackupSchedule(scheduleName, backupPolicy), model.ActionCreatePtr() +// } +// // sync backup schedule +// r.syncBackupSchedule(backupSchedule) +// return backupSchedule, model.ActionUpdatePtr() +//} +// +//func (r *componentBackupPolicyTransformer) buildBackupSchedule( +// name string, +// backupPolicy *dpv1alpha1.BackupPolicy) *dpv1alpha1.BackupSchedule { +// cluster := r.OrigCluster +// backupSchedule := &dpv1alpha1.BackupSchedule{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: name, +// Namespace: cluster.Namespace, +// Labels: r.buildLabels(), +// Annotations: r.buildAnnotations(), +// }, +// Spec: dpv1alpha1.BackupScheduleSpec{ +// BackupPolicyName: backupPolicy.Name, +// }, +// } +// +// var schedules []dpv1alpha1.SchedulePolicy +// for _, s := range r.backupPolicy.Schedules { +// schedules = append(schedules, dpv1alpha1.SchedulePolicy{ +// BackupMethod: s.BackupMethod, +// CronExpression: s.CronExpression, +// Enabled: s.Enabled, +// RetentionPeriod: r.backupPolicy.RetentionPeriod, +// }) +// } +// backupSchedule.Spec.Schedules = schedules +// return backupSchedule +//} +// +//func (r *componentBackupPolicyTransformer) syncBackupSchedule(backupSchedule *dpv1alpha1.BackupSchedule) { +// scheduleMethodMap := map[string]struct{}{} +// for _, s := range backupSchedule.Spec.Schedules { +// scheduleMethodMap[s.BackupMethod] = struct{}{} +// } +// // sync the newly added schedule policies. +// for _, s := range r.backupPolicy.Schedules { +// if _, ok := scheduleMethodMap[s.BackupMethod]; ok { +// continue +// } +// backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, dpv1alpha1.SchedulePolicy{ +// BackupMethod: s.BackupMethod, +// CronExpression: s.CronExpression, +// Enabled: s.Enabled, +// RetentionPeriod: r.backupPolicy.RetentionPeriod, +// }) +// } +//} +// +//// syncBackupPolicy syncs labels and annotations of the backup policy with the cluster changes. +//func (r *componentBackupPolicyTransformer) syncBackupPolicy(backupPolicy *dpv1alpha1.BackupPolicy) { +// // update labels and annotations of the backup policy. +// if backupPolicy.Annotations == nil { +// backupPolicy.Annotations = map[string]string{} +// } +// if backupPolicy.Labels == nil { +// backupPolicy.Labels = map[string]string{} +// } +// mergeMap(backupPolicy.Annotations, r.buildAnnotations()) +// mergeMap(backupPolicy.Labels, r.buildLabels()) +// +// // update backup repo of the backup policy. +// if r.Cluster.Spec.Backup != nil && r.Cluster.Spec.Backup.RepoName != "" { +// backupPolicy.Spec.BackupRepoName = &r.Cluster.Spec.Backup.RepoName +// } +// +// r.syncBackupMethods(backupPolicy) +// +// // only update the role labelSelector of the backup target instance when +// // component workload is Replication/Consensus. Because the replicas of +// // component will change, such as 2->1. then if the target role is 'follower' +// // and replicas is 1, the target instance can not be found. so we sync the +// // label selector automatically. +// if !workloadHasRoleLabel(r.compWorkloadType) { +// return +// } +// +// comp := r.getClusterComponentSpec() +// if comp == nil { +// return +// } +// +// // convert role labelSelector based on the replicas of the component automatically. +// // TODO(ldm): need more review. +// role := r.backupPolicy.Target.Role +// if len(role) == 0 { +// return +// } +// +// podSelector := backupPolicy.Spec.Target.PodSelector +// if podSelector.LabelSelector == nil || podSelector.LabelSelector.MatchLabels == nil { +// podSelector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} +// } +// if r.getCompReplicas() == 1 { +// delete(podSelector.LabelSelector.MatchLabels, constant.RoleLabelKey) +// } else { +// podSelector.LabelSelector.MatchLabels[constant.RoleLabelKey] = role +// } +//} +// +//func (r *componentBackupPolicyTransformer) getCompReplicas() int32 { +// rsm := &workloads.ReplicatedStateMachine{} +// compSpec := r.getClusterComponentSpec() +// rsmName := fmt.Sprintf("%s-%s", r.Cluster.Name, compSpec.Name) +// if err := r.Client.Get(r.Context, client.ObjectKey{Name: rsmName, Namespace: r.Cluster.Namespace}, rsm); err != nil { +// return compSpec.Replicas +// } +// return *rsm.Spec.Replicas +//} +// +//// buildBackupPolicy builds a new backup policy by the backup policy template. +//func (r *componentBackupPolicyTransformer) buildBackupPolicy(backupPolicyName string) *dpv1alpha1.BackupPolicy { +// comp := r.getClusterComponentSpec() +// if comp == nil { +// return nil +// } +// +// cluster := r.OrigCluster +// backupPolicy := &dpv1alpha1.BackupPolicy{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: backupPolicyName, +// Namespace: cluster.Namespace, +// Labels: r.buildLabels(), +// Annotations: r.buildAnnotations(), +// }, +// } +// r.syncBackupMethods(backupPolicy) +// bpSpec := backupPolicy.Spec +// // if cluster have backup repo, set backup repo name to backup policy. +// if cluster.Spec.Backup != nil && cluster.Spec.Backup.RepoName != "" { +// bpSpec.BackupRepoName = &cluster.Spec.Backup.RepoName +// } +// bpSpec.PathPrefix = buildBackupPathPrefix(cluster, comp.Name) +// bpSpec.Target = r.buildBackupTarget(comp) +// backupPolicy.Spec = bpSpec +// return backupPolicy +//} +// +//// syncBackupMethods syncs the backupMethod of tpl to backupPolicy. +//func (r *componentBackupPolicyTransformer) syncBackupMethods(backupPolicy *dpv1alpha1.BackupPolicy) { +// var backupMethods []dpv1alpha1.BackupMethod +// for _, v := range r.backupPolicy.BackupMethods { +// mappingEnv := r.doEnvMapping(v.EnvMapping) +// v.BackupMethod.Env = dputils.MergeEnv(v.BackupMethod.Env, mappingEnv) +// backupMethods = append(backupMethods, v.BackupMethod) +// } +// backupPolicy.Spec.BackupMethods = backupMethods +//} +// +//func (r *componentBackupPolicyTransformer) doEnvMapping(envMapping []appsv1alpha1.EnvMappingVar) []corev1.EnvVar { +// var env []corev1.EnvVar +// for _, v := range envMapping { +// for _, cv := range v.ValueFrom.ClusterVersionRef { +// if !slices.Contains(cv.Names, r.Cluster.Spec.ClusterVersionRef) { +// continue +// } +// env = append(env, corev1.EnvVar{ +// Name: v.Key, +// Value: cv.MappingValue, +// }) +// } +// } +// return env +//} +// +//func (r *componentBackupPolicyTransformer) buildBackupTarget( +// comp *appsv1alpha1.ClusterComponentSpec) *dpv1alpha1.BackupTarget { +// targetTpl := r.backupPolicy.Target +// clusterName := r.OrigCluster.Name +// +// getSAName := func() string { +// if comp.ServiceAccountName != "" { +// return comp.ServiceAccountName +// } +// return "kb-" + r.Cluster.Name +// } +// +// // build the target connection credential +// cc := dpv1alpha1.ConnectionCredential{} +// if len(targetTpl.Account) > 0 { +// cc.SecretName = fmt.Sprintf("%s-%s-%s", clusterName, comp.Name, targetTpl.Account) +// cc.PasswordKey = constant.AccountPasswdForSecret +// cc.PasswordKey = constant.AccountNameForSecret +// } else { +// cc.SecretName = constant.GenerateDefaultConnCredential(clusterName) +// ccKey := targetTpl.ConnectionCredentialKey +// if ccKey.PasswordKey != nil { +// cc.PasswordKey = *ccKey.PasswordKey +// } +// if ccKey.UsernameKey != nil { +// cc.UsernameKey = *ccKey.UsernameKey +// } +// if ccKey.PortKey != nil { +// cc.PortKey = *ccKey.PortKey +// } +// if ccKey.HostKey != nil { +// cc.HostKey = *ccKey.HostKey +// } +// } +// +// target := &dpv1alpha1.BackupTarget{ +// PodSelector: &dpv1alpha1.PodSelector{ +// Strategy: dpv1alpha1.PodSelectionStrategyAny, +// LabelSelector: &metav1.LabelSelector{ +// MatchLabels: r.buildTargetPodLabels(comp), +// }, +// }, +// ConnectionCredential: &cc, +// ServiceAccountName: getSAName(), +// } +// return target +//} +// +//func (r *componentBackupPolicyTransformer) mergeClusterBackup( +// backupPolicy *dpv1alpha1.BackupPolicy, +// backupSchedule *dpv1alpha1.BackupSchedule) *dpv1alpha1.BackupSchedule { +// cluster := r.OrigCluster +// backupEnabled := func() bool { +// return cluster.Spec.Backup != nil && boolValue(cluster.Spec.Backup.Enabled) +// } +// +// if backupPolicy == nil || cluster.Spec.Backup == nil { +// // backup policy is nil, can not enable cluster backup, so record event and return. +// if backupEnabled() { +// r.EventRecorder.Event(r.Cluster, corev1.EventTypeWarning, +// "BackupPolicyNotFound", "backup policy is nil, can not enable cluster backup") +// } +// return backupSchedule +// } +// +// backup := cluster.Spec.Backup +// // there is no backup schedule created by backup policy template, so we need to +// // create a new backup schedule for cluster backup. +// if backupSchedule == nil { +// backupSchedule = &dpv1alpha1.BackupSchedule{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier), +// Namespace: cluster.Namespace, +// Labels: r.buildLabels(), +// Annotations: r.buildAnnotations(), +// }, +// Spec: dpv1alpha1.BackupScheduleSpec{ +// BackupPolicyName: backupPolicy.Name, +// StartingDeadlineMinutes: backup.StartingDeadlineMinutes, +// Schedules: []dpv1alpha1.SchedulePolicy{}, +// }, +// } +// } +// +// // build backup schedule policy by cluster backup spec +// sp := &dpv1alpha1.SchedulePolicy{ +// Enabled: backup.Enabled, +// RetentionPeriod: backup.RetentionPeriod, +// BackupMethod: backup.Method, +// CronExpression: backup.CronExpression, +// } +// +// // merge cluster backup schedule policy into backup schedule, if the backup +// // schedule with specified method already exists, we need to update it +// // using the cluster backup schedule policy. Otherwise, we need to append +// // it to the backup schedule. +// for i, s := range backupSchedule.Spec.Schedules { +// if s.BackupMethod == backup.Method { +// mergeSchedulePolicy(sp, &backupSchedule.Spec.Schedules[i]) +// return backupSchedule +// } +// } +// backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, *sp) +// return backupSchedule +//} +// +//// getClusterComponentSpec returns the first component name of the componentDefRef. +//func (r *componentBackupPolicyTransformer) getClusterComponentSpec() *appsv1alpha1.ClusterComponentSpec { +// for _, v := range r.componentTransformContext.ComponentSpecs { +// if v.ComponentDefRef == r.backupPolicy.ComponentDefRef { +// return v +// } +// } +// return nil +//} +// +//func (r *componentBackupPolicyTransformer) defaultPolicyAnnotationValue() string { +// if r.tplCount > 1 && r.isDefaultTemplate != trueVal { +// return "false" +// } +// return trueVal +//} +// +//func (r *componentBackupPolicyTransformer) buildAnnotations() map[string]string { +// annotations := map[string]string{ +// dptypes.DefaultBackupPolicyAnnotationKey: r.defaultPolicyAnnotationValue(), +// constant.BackupPolicyTemplateAnnotationKey: r.backupPolicyTpl.Name, +// } +// if r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] != "" { +// annotations[dptypes.ReconfigureRefAnnotationKey] = r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] +// } +// return annotations +//} +// +//func (r *componentBackupPolicyTransformer) buildLabels() map[string]string { +// return map[string]string{ +// constant.AppInstanceLabelKey: r.OrigCluster.Name, +// constant.KBAppComponentDefRefLabelKey: r.backupPolicy.ComponentDefRef, +// constant.AppManagedByLabelKey: constant.AppName, +// } +//} +// +//// buildTargetPodLabels builds the target labels for the backup policy that will be +//// used to select the target pod. +//func (r *componentBackupPolicyTransformer) buildTargetPodLabels(comp *appsv1alpha1.ClusterComponentSpec) map[string]string { +// labels := map[string]string{ +// constant.AppInstanceLabelKey: r.OrigCluster.Name, +// constant.KBAppComponentLabelKey: comp.Name, +// constant.AppManagedByLabelKey: constant.AppName, +// } +// // append label to filter specific role of the component. +// targetTpl := &r.backupPolicy.Target +// if workloadHasRoleLabel(r.compWorkloadType) && +// len(targetTpl.Role) > 0 && r.getCompReplicas() > 1 { +// // the role only works when the component has multiple replicas. +// labels[constant.RoleLabelKey] = targetTpl.Role +// } +// return labels +//} +// +//// generateBackupPolicyName generates the backup policy name which is created from backup policy template. +//func generateBackupPolicyName(clusterName, componentDef, identifier string) string { +// if len(identifier) == 0 { +// return fmt.Sprintf("%s-%s-backup-policy", clusterName, componentDef) +// } +// return fmt.Sprintf("%s-%s-backup-policy-%s", clusterName, componentDef, identifier) +//} +// +//// generateBackupScheduleName generates the backup schedule name which is created from backup policy template. +//func generateBackupScheduleName(clusterName, componentDef, identifier string) string { +// if len(identifier) == 0 { +// return fmt.Sprintf("%s-%s-backup-schedule", clusterName, componentDef) +// } +// return fmt.Sprintf("%s-%s-backup-schedule-%s", clusterName, componentDef, identifier) +//} +// +//func buildBackupPathPrefix(cluster *appsv1alpha1.Cluster, compName string) string { +// return fmt.Sprintf("/%s-%s/%s", cluster.Name, cluster.UID, compName) +//} +// +//func workloadHasRoleLabel(workloadType appsv1alpha1.WorkloadType) bool { +// return slices.Contains([]appsv1alpha1.WorkloadType{appsv1alpha1.Replication, appsv1alpha1.Consensus}, workloadType) +//} +// +//func mergeSchedulePolicy(src *dpv1alpha1.SchedulePolicy, dst *dpv1alpha1.SchedulePolicy) { +// if src.Enabled != nil { +// dst.Enabled = src.Enabled +// } +// if src.RetentionPeriod.String() != "" { +// dst.RetentionPeriod = src.RetentionPeriod +// } +// if src.BackupMethod != "" { +// dst.BackupMethod = src.BackupMethod +// } +// if src.CronExpression != "" { +// dst.CronExpression = src.CronExpression +// } +//} diff --git a/controllers/apps/transformer_component_backup_policy.go b/controllers/apps/transformer_component_backup_policy.go deleted file mode 100644 index 4e1b8621fee..00000000000 --- a/controllers/apps/transformer_component_backup_policy.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - "github.com/apecloud/kubeblocks/pkg/controller/graph" - "github.com/apecloud/kubeblocks/pkg/controller/model" -) - -// componentBackupPolicyTransformer handles the component PDB -type componentBackupPolicyTransformer struct{} - -var _ graph.Transformer = &componentBackupPolicyTransformer{} - -func (t *componentBackupPolicyTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - cctx, _ := ctx.(*componentTransformContext) - if model.IsObjectDeleting(cctx.ComponentOrig) { - return nil - } - // TODO(component) - return nil -} diff --git a/controllers/apps/transformer_component_load_resources.go b/controllers/apps/transformer_component_load_resources.go index 5ade116b4e8..b8ad4545585 100644 --- a/controllers/apps/transformer_component_load_resources.go +++ b/controllers/apps/transformer_component_load_resources.go @@ -23,6 +23,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/pkg/controller/component" @@ -31,7 +32,9 @@ import ( ) // componentLoadResourcesTransformer handles referenced resources validation and load them into context -type componentLoadResourcesTransformer struct{} +type componentLoadResourcesTransformer struct { + client.Client +} var _ graph.Transformer = &componentLoadResourcesTransformer{} @@ -49,34 +52,75 @@ func (t *componentLoadResourcesTransformer) Transform(ctx graph.TransformContext setProvisioningStartedCondition(&comp.Status.Conditions, comp.Name, comp.Generation, err) }() - // get and init component definition context - compDef := &appsv1alpha1.ComponentDefinition{} - err = transCtx.Client.Get(transCtx.Context, types.NamespacedName{Name: comp.Spec.CompDef}, compDef) + // TODO(xingran): In order to backward compatibility in KubeBlocks version 0.8.0, the cluster field is still required. However, if in the future the Component objects can be used independently, the Cluster field should be removed from the component.Spec + cluster := &appsv1alpha1.Cluster{} + err = transCtx.Client.Get(transCtx.Context, types.NamespacedName{Name: comp.Spec.Cluster, Namespace: comp.Namespace}, cluster) if err != nil { return newRequeueError(requeueDuration, err.Error()) } + compDef, err := t.getOrBuildCompDef(reqCtx, transCtx, cluster) + if err != nil { + return newRequeueError(requeueDuration, err.Error()) + } if compDef.Status.Phase != appsv1alpha1.AvailablePhase { message := fmt.Sprintf("ComponentDefinition referenced is unavailable: %s", compDef.Name) return newRequeueError(requeueDuration, message) } - // get and init cluster context - // TODO(xingran): In order to backward compatibility in KubeBlocks version 0.8.0, the cluster field is still required. However, if in the future the Component objects can be used independently, the Cluster field should be removed from the component.Spec - cluster := &appsv1alpha1.Cluster{} - err = transCtx.Client.Get(transCtx.Context, types.NamespacedName{Name: comp.Spec.Cluster, Namespace: comp.Namespace}, cluster) - if err != nil { - return newRequeueError(requeueDuration, err.Error()) - } - transCtx.CompDef = compDef transCtx.Cluster = cluster synthesizeComp, err := component.BuildSynthesizedComponent(reqCtx, transCtx.Client, compDef, cluster, comp) if err != nil { - message := fmt.Sprintf("Component %s BuildSynthesizedComponent failed: %s", comp.Name, err.Error()) + message := fmt.Sprintf("build synthesized component for %s failed: %s", comp.Name, err.Error()) return newRequeueError(requeueDuration, message) } transCtx.SynthesizeComponent = synthesizeComp return nil } + +func (t *componentLoadResourcesTransformer) getOrBuildCompDef(reqCtx ictrlutil.RequestCtx, + transCtx *componentTransformContext, cluster *appsv1alpha1.Cluster) (*appsv1alpha1.ComponentDefinition, error) { + clusterCompSpec, err := t.isLegacyComponent(cluster, transCtx.Component) + if err != nil { + return nil, err + } + var compDef *appsv1alpha1.ComponentDefinition + if clusterCompSpec != nil { + compDef, err = component.BuildComponentDefinition(reqCtx, t.Client, cluster, clusterCompSpec) + if err != nil { + return nil, err + } + } else { + compDef = &appsv1alpha1.ComponentDefinition{} + err = transCtx.Client.Get(transCtx.Context, types.NamespacedName{Name: transCtx.Component.Spec.CompDef}, compDef) + if err != nil { + return nil, err + } + } + return compDef, nil +} + +func (t *componentLoadResourcesTransformer) isLegacyComponent(cluster *appsv1alpha1.Cluster, + comp *appsv1alpha1.Component) (*appsv1alpha1.ClusterComponentSpec, error) { + compName, err := component.ShortName(cluster.Name, comp.Name) + if err != nil { + return nil, err + } + var targetCompSpec *appsv1alpha1.ClusterComponentSpec + for i, compSpec := range cluster.Spec.ComponentSpecs { + if compSpec.Name == compName { + if len(compSpec.ComponentDef) > 0 { + if compSpec.ComponentDef == comp.Spec.CompDef { + return nil, nil + } + return nil, fmt.Errorf("runtime error - comp definitions refered in cluster and component are different: %s vs %s", + compSpec.ComponentDef, comp.Spec.CompDef) + } + targetCompSpec = &cluster.Spec.ComponentSpecs[i] + break + } + } + return targetCompSpec, nil +} diff --git a/controllers/apps/transformer_component_restore.go b/controllers/apps/transformer_component_restore.go deleted file mode 100644 index 47696536dd7..00000000000 --- a/controllers/apps/transformer_component_restore.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - "github.com/apecloud/kubeblocks/pkg/controller/graph" - "github.com/apecloud/kubeblocks/pkg/controller/model" -) - -// componentRestoreTransformer handles the component PDB -type componentRestoreTransformer struct{} - -var _ graph.Transformer = &componentRestoreTransformer{} - -func (t *componentRestoreTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - cctx, _ := ctx.(*componentTransformContext) - if model.IsObjectDeleting(cctx.ComponentOrig) { - return nil - } - // TODO(component) - return nil -} From d95295a2f2b97e8f5ddc83d1ba0d936c13fc2ff5 Mon Sep 17 00:00:00 2001 From: Leon Date: Mon, 30 Oct 2023 15:59:14 +0800 Subject: [PATCH 2/2] move backup policy template to cluster controller --- controllers/apps/cluster_controller.go | 2 + controllers/apps/component_controller.go | 2 - .../apps/transformer_cluster_backup_policy.go | 586 ++++++++++++++++++ .../apps/transformer_component_backup.go | 576 ----------------- 4 files changed, 588 insertions(+), 578 deletions(-) create mode 100644 controllers/apps/transformer_cluster_backup_policy.go delete mode 100644 controllers/apps/transformer_component_backup.go diff --git a/controllers/apps/cluster_controller.go b/controllers/apps/cluster_controller.go index ec06d9c5c57..ab498a618cf 100644 --- a/controllers/apps/cluster_controller.go +++ b/controllers/apps/cluster_controller.go @@ -144,6 +144,8 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct &ClusterRestoreTransformer{Client: r.Client}, // create all cluster components objects &ClusterComponentTransformer{Client: r.Client}, + // build backuppolicy and backupschedule from backupPolicyTemplate + &clusterBackupPolicyTransformer{}, // add our finalizer to all objects &ClusterOwnershipTransformer{}, // make all workload objects depending on credential secret diff --git a/controllers/apps/component_controller.go b/controllers/apps/component_controller.go index 27a85929550..840bae3667b 100644 --- a/controllers/apps/component_controller.go +++ b/controllers/apps/component_controller.go @@ -142,8 +142,6 @@ func (r *ComponentReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( &componentConfigurationTransformer{Client: r.Client}, // handle the component workload &componentWorkloadTransformer{Client: r.Client}, - // generate backuppolicy and backupschedule from backupPolicyTemplate - &componentBackupPolicyTransformer{}, // handle RBAC for component workloads &componentRBACTransformer{}, // add our finalizer to all objects diff --git a/controllers/apps/transformer_cluster_backup_policy.go b/controllers/apps/transformer_cluster_backup_policy.go new file mode 100644 index 00000000000..909710532d1 --- /dev/null +++ b/controllers/apps/transformer_cluster_backup_policy.go @@ -0,0 +1,586 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package apps + +import ( + "fmt" + + "golang.org/x/exp/slices" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/graph" + "github.com/apecloud/kubeblocks/pkg/controller/model" + intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + dptypes "github.com/apecloud/kubeblocks/pkg/dataprotection/types" + dputils "github.com/apecloud/kubeblocks/pkg/dataprotection/utils" +) + +// clusterBackupPolicyTransformer transforms the backup policy template to the data protection backup policy and backup schedule. +type clusterBackupPolicyTransformer struct { + *clusterTransformContext + + tplCount int + tplIdentifier string + isDefaultTemplate string + + backupPolicyTpl *appsv1alpha1.BackupPolicyTemplate + backupPolicy *appsv1alpha1.BackupPolicy + compWorkloadType appsv1alpha1.WorkloadType +} + +var _ graph.Transformer = &clusterBackupPolicyTransformer{} + +// Transform transforms the backup policy template to the backup policy and backup schedule. +func (r *clusterBackupPolicyTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { + r.clusterTransformContext = ctx.(*clusterTransformContext) + if model.IsObjectDeleting(r.clusterTransformContext.OrigCluster) { + return nil + } + + graphCli, _ := r.clusterTransformContext.Client.(model.GraphClient) + + clusterDefName := r.ClusterDef.Name + backupPolicyTpls := &appsv1alpha1.BackupPolicyTemplateList{} + if err := r.Client.List(r.Context, backupPolicyTpls, + client.MatchingLabels{constant.ClusterDefLabelKey: clusterDefName}); err != nil { + return err + } + r.tplCount = len(backupPolicyTpls.Items) + if r.tplCount == 0 { + return nil + } + + backupPolicyNames := map[string]struct{}{} + backupScheduleNames := map[string]struct{}{} + for _, tpl := range backupPolicyTpls.Items { + r.isDefaultTemplate = tpl.Annotations[dptypes.DefaultBackupPolicyTemplateAnnotationKey] + r.tplIdentifier = tpl.Spec.Identifier + r.backupPolicyTpl = &tpl + + for i, bp := range tpl.Spec.BackupPolicies { + compDef := r.ClusterDef.GetComponentDefByName(bp.ComponentDefRef) + if compDef == nil { + return intctrlutil.NewNotFound("componentDef %s not found in ClusterDefinition: %s ", + bp.ComponentDefRef, clusterDefName) + } + + r.backupPolicy = &tpl.Spec.BackupPolicies[i] + r.compWorkloadType = compDef.WorkloadType + + transformBackupPolicy := func() *dpv1alpha1.BackupPolicy { + // build the data protection backup policy from the template. + dpBackupPolicy, action := r.transformBackupPolicy() + if dpBackupPolicy == nil { + return nil + } + + // if exist multiple backup policy templates and duplicate spec.identifier, + // the generated backupPolicy may have duplicate names, so it is + // necessary to check if it already exists. + if _, ok := backupPolicyNames[dpBackupPolicy.Name]; ok { + return dpBackupPolicy + } + + switch *action { + case model.CREATE: + graphCli.Create(dag, dpBackupPolicy) + case model.UPDATE: + graphCli.Update(dag, dpBackupPolicy, dpBackupPolicy) + } + backupPolicyNames[dpBackupPolicy.Name] = struct{}{} + return dpBackupPolicy + } + + transformBackupSchedule := func(backupPolicy *dpv1alpha1.BackupPolicy) { + // if backup policy is nil, it means that the backup policy template + // is invalid, backup schedule depends on backup policy, so we do + // not need to transform backup schedule. + if backupPolicy == nil { + return + } + + // only create backup schedule for the default backup policy template + // if there are more than one backup policy templates. + if r.isDefaultTemplate != trueVal && r.tplCount > 1 { + return + } + + // build the data protection backup schedule from the template. + dpBackupSchedule, action := r.transformBackupSchedule(backupPolicy) + + // merge cluster backup configuration into the backup schedule. + // If the backup schedule is nil, create a new backup schedule + // based on the cluster backup configuration. + if dpBackupSchedule == nil { + action = model.ActionCreatePtr() + } else if action == nil { + action = model.ActionUpdatePtr() + } + + // for a cluster, the default backup schedule is created by backup + // policy template, user can also configure cluster backup in the + // cluster custom object, such as enable cluster backup, set backup + // schedule, etc. + // We always prioritize the cluster backup configuration in the + // cluster object, so we need to merge the cluster backup configuration + // into the default backup schedule created by backup policy template + // if it exists. + dpBackupSchedule = r.mergeClusterBackup(backupPolicy, dpBackupSchedule) + if dpBackupSchedule == nil { + return + } + + // if exist multiple backup policy templates and duplicate spec.identifier, + // the backupPolicy that may be generated may have duplicate names, + // and it is necessary to check if it already exists. + if _, ok := backupScheduleNames[dpBackupSchedule.Name]; ok { + return + } + + switch *action { + case model.CREATE: + graphCli.Create(dag, dpBackupSchedule) + case model.UPDATE: + graphCli.Update(dag, dpBackupSchedule, dpBackupSchedule) + } + graphCli.DependOn(dag, backupPolicy, dpBackupSchedule) + backupScheduleNames[dpBackupSchedule.Name] = struct{}{} + } + + // transform backup policy template to data protection backupPolicy + // and backupSchedule + policy := transformBackupPolicy() + transformBackupSchedule(policy) + } + } + return nil +} + +// transformBackupPolicy transforms backup policy template to backup policy. +func (r *clusterBackupPolicyTransformer) transformBackupPolicy() (*dpv1alpha1.BackupPolicy, *model.Action) { + cluster := r.OrigCluster + backupPolicyName := generateBackupPolicyName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) + backupPolicy := &dpv1alpha1.BackupPolicy{} + if err := r.Client.Get(r.Context, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: backupPolicyName, + }, backupPolicy); client.IgnoreNotFound(err) != nil { + return nil, nil + } + + if len(backupPolicy.Name) == 0 { + // build a new backup policy by the backup policy template. + return r.buildBackupPolicy(backupPolicyName), model.ActionCreatePtr() + } + + // sync the existing backup policy with the cluster changes + r.syncBackupPolicy(backupPolicy) + return backupPolicy, model.ActionUpdatePtr() +} + +func (r *clusterBackupPolicyTransformer) transformBackupSchedule( + backupPolicy *dpv1alpha1.BackupPolicy) (*dpv1alpha1.BackupSchedule, *model.Action) { + cluster := r.OrigCluster + scheduleName := generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) + backupSchedule := &dpv1alpha1.BackupSchedule{} + if err := r.Client.Get(r.Context, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: scheduleName, + }, backupSchedule); client.IgnoreNotFound(err) != nil { + return nil, nil + } + + if len(backupSchedule.Name) == 0 { + // build a new backup schedule from the backup policy template. + return r.buildBackupSchedule(scheduleName, backupPolicy), model.ActionCreatePtr() + } + // sync backup schedule + r.syncBackupSchedule(backupSchedule) + return backupSchedule, model.ActionUpdatePtr() +} + +func (r *clusterBackupPolicyTransformer) buildBackupSchedule( + name string, + backupPolicy *dpv1alpha1.BackupPolicy) *dpv1alpha1.BackupSchedule { + cluster := r.OrigCluster + backupSchedule := &dpv1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cluster.Namespace, + Labels: r.buildLabels(), + Annotations: r.buildAnnotations(), + }, + Spec: dpv1alpha1.BackupScheduleSpec{ + BackupPolicyName: backupPolicy.Name, + }, + } + + var schedules []dpv1alpha1.SchedulePolicy + for _, s := range r.backupPolicy.Schedules { + schedules = append(schedules, dpv1alpha1.SchedulePolicy{ + BackupMethod: s.BackupMethod, + CronExpression: s.CronExpression, + Enabled: s.Enabled, + RetentionPeriod: r.backupPolicy.RetentionPeriod, + }) + } + backupSchedule.Spec.Schedules = schedules + return backupSchedule +} + +func (r *clusterBackupPolicyTransformer) syncBackupSchedule(backupSchedule *dpv1alpha1.BackupSchedule) { + scheduleMethodMap := map[string]struct{}{} + for _, s := range backupSchedule.Spec.Schedules { + scheduleMethodMap[s.BackupMethod] = struct{}{} + } + // sync the newly added schedule policies. + for _, s := range r.backupPolicy.Schedules { + if _, ok := scheduleMethodMap[s.BackupMethod]; ok { + continue + } + backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, dpv1alpha1.SchedulePolicy{ + BackupMethod: s.BackupMethod, + CronExpression: s.CronExpression, + Enabled: s.Enabled, + RetentionPeriod: r.backupPolicy.RetentionPeriod, + }) + } +} + +// syncBackupPolicy syncs labels and annotations of the backup policy with the cluster changes. +func (r *clusterBackupPolicyTransformer) syncBackupPolicy(backupPolicy *dpv1alpha1.BackupPolicy) { + // update labels and annotations of the backup policy. + if backupPolicy.Annotations == nil { + backupPolicy.Annotations = map[string]string{} + } + if backupPolicy.Labels == nil { + backupPolicy.Labels = map[string]string{} + } + mergeMap(backupPolicy.Annotations, r.buildAnnotations()) + mergeMap(backupPolicy.Labels, r.buildLabels()) + + // update backup repo of the backup policy. + if r.Cluster.Spec.Backup != nil && r.Cluster.Spec.Backup.RepoName != "" { + backupPolicy.Spec.BackupRepoName = &r.Cluster.Spec.Backup.RepoName + } + + r.syncBackupMethods(backupPolicy) + + // only update the role labelSelector of the backup target instance when + // component workload is Replication/Consensus. Because the replicas of + // component will change, such as 2->1. then if the target role is 'follower' + // and replicas is 1, the target instance can not be found. so we sync the + // label selector automatically. + if !workloadHasRoleLabel(r.compWorkloadType) { + return + } + + comp := r.getClusterComponentSpec() + if comp == nil { + return + } + + // convert role labelSelector based on the replicas of the component automatically. + // TODO(ldm): need more review. + role := r.backupPolicy.Target.Role + if len(role) == 0 { + return + } + + podSelector := backupPolicy.Spec.Target.PodSelector + if podSelector.LabelSelector == nil || podSelector.LabelSelector.MatchLabels == nil { + podSelector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} + } + if r.getCompReplicas() == 1 { + delete(podSelector.LabelSelector.MatchLabels, constant.RoleLabelKey) + } else { + podSelector.LabelSelector.MatchLabels[constant.RoleLabelKey] = role + } +} + +func (r *clusterBackupPolicyTransformer) getCompReplicas() int32 { + rsm := &workloads.ReplicatedStateMachine{} + compSpec := r.getClusterComponentSpec() + rsmName := fmt.Sprintf("%s-%s", r.Cluster.Name, compSpec.Name) + if err := r.Client.Get(r.Context, client.ObjectKey{Name: rsmName, Namespace: r.Cluster.Namespace}, rsm); err != nil { + return compSpec.Replicas + } + return *rsm.Spec.Replicas +} + +// buildBackupPolicy builds a new backup policy by the backup policy template. +func (r *clusterBackupPolicyTransformer) buildBackupPolicy(backupPolicyName string) *dpv1alpha1.BackupPolicy { + comp := r.getClusterComponentSpec() + if comp == nil { + return nil + } + + cluster := r.OrigCluster + backupPolicy := &dpv1alpha1.BackupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupPolicyName, + Namespace: cluster.Namespace, + Labels: r.buildLabels(), + Annotations: r.buildAnnotations(), + }, + } + r.syncBackupMethods(backupPolicy) + bpSpec := backupPolicy.Spec + // if cluster have backup repo, set backup repo name to backup policy. + if cluster.Spec.Backup != nil && cluster.Spec.Backup.RepoName != "" { + bpSpec.BackupRepoName = &cluster.Spec.Backup.RepoName + } + bpSpec.PathPrefix = buildBackupPathPrefix(cluster, comp.Name) + bpSpec.Target = r.buildBackupTarget(comp) + backupPolicy.Spec = bpSpec + return backupPolicy +} + +// syncBackupMethods syncs the backupMethod of tpl to backupPolicy. +func (r *clusterBackupPolicyTransformer) syncBackupMethods(backupPolicy *dpv1alpha1.BackupPolicy) { + var backupMethods []dpv1alpha1.BackupMethod + for _, v := range r.backupPolicy.BackupMethods { + mappingEnv := r.doEnvMapping(v.EnvMapping) + v.BackupMethod.Env = dputils.MergeEnv(v.BackupMethod.Env, mappingEnv) + backupMethods = append(backupMethods, v.BackupMethod) + } + backupPolicy.Spec.BackupMethods = backupMethods +} + +func (r *clusterBackupPolicyTransformer) doEnvMapping(envMapping []appsv1alpha1.EnvMappingVar) []corev1.EnvVar { + var env []corev1.EnvVar + for _, v := range envMapping { + for _, cv := range v.ValueFrom.ClusterVersionRef { + if !slices.Contains(cv.Names, r.Cluster.Spec.ClusterVersionRef) { + continue + } + env = append(env, corev1.EnvVar{ + Name: v.Key, + Value: cv.MappingValue, + }) + } + } + return env +} + +func (r *clusterBackupPolicyTransformer) buildBackupTarget( + comp *appsv1alpha1.ClusterComponentSpec) *dpv1alpha1.BackupTarget { + targetTpl := r.backupPolicy.Target + clusterName := r.OrigCluster.Name + + getSAName := func() string { + if comp.ServiceAccountName != "" { + return comp.ServiceAccountName + } + return "kb-" + r.Cluster.Name + } + + // build the target connection credential + cc := dpv1alpha1.ConnectionCredential{} + if len(targetTpl.Account) > 0 { + cc.SecretName = fmt.Sprintf("%s-%s-%s", clusterName, comp.Name, targetTpl.Account) + cc.PasswordKey = constant.AccountPasswdForSecret + cc.PasswordKey = constant.AccountNameForSecret + } else { + cc.SecretName = constant.GenerateDefaultConnCredential(clusterName) + ccKey := targetTpl.ConnectionCredentialKey + if ccKey.PasswordKey != nil { + cc.PasswordKey = *ccKey.PasswordKey + } + if ccKey.UsernameKey != nil { + cc.UsernameKey = *ccKey.UsernameKey + } + if ccKey.PortKey != nil { + cc.PortKey = *ccKey.PortKey + } + if ccKey.HostKey != nil { + cc.HostKey = *ccKey.HostKey + } + } + + target := &dpv1alpha1.BackupTarget{ + PodSelector: &dpv1alpha1.PodSelector{ + Strategy: dpv1alpha1.PodSelectionStrategyAny, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: r.buildTargetPodLabels(comp), + }, + }, + ConnectionCredential: &cc, + ServiceAccountName: getSAName(), + } + return target +} + +func (r *clusterBackupPolicyTransformer) mergeClusterBackup( + backupPolicy *dpv1alpha1.BackupPolicy, + backupSchedule *dpv1alpha1.BackupSchedule) *dpv1alpha1.BackupSchedule { + cluster := r.OrigCluster + backupEnabled := func() bool { + return cluster.Spec.Backup != nil && boolValue(cluster.Spec.Backup.Enabled) + } + + if backupPolicy == nil || cluster.Spec.Backup == nil { + // backup policy is nil, can not enable cluster backup, so record event and return. + if backupEnabled() { + r.EventRecorder.Event(r.Cluster, corev1.EventTypeWarning, + "BackupPolicyNotFound", "backup policy is nil, can not enable cluster backup") + } + return backupSchedule + } + + backup := cluster.Spec.Backup + // there is no backup schedule created by backup policy template, so we need to + // create a new backup schedule for cluster backup. + if backupSchedule == nil { + backupSchedule = &dpv1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier), + Namespace: cluster.Namespace, + Labels: r.buildLabels(), + Annotations: r.buildAnnotations(), + }, + Spec: dpv1alpha1.BackupScheduleSpec{ + BackupPolicyName: backupPolicy.Name, + StartingDeadlineMinutes: backup.StartingDeadlineMinutes, + Schedules: []dpv1alpha1.SchedulePolicy{}, + }, + } + } + + // build backup schedule policy by cluster backup spec + sp := &dpv1alpha1.SchedulePolicy{ + Enabled: backup.Enabled, + RetentionPeriod: backup.RetentionPeriod, + BackupMethod: backup.Method, + CronExpression: backup.CronExpression, + } + + // merge cluster backup schedule policy into backup schedule, if the backup + // schedule with specified method already exists, we need to update it + // using the cluster backup schedule policy. Otherwise, we need to append + // it to the backup schedule. + for i, s := range backupSchedule.Spec.Schedules { + if s.BackupMethod == backup.Method { + mergeSchedulePolicy(sp, &backupSchedule.Spec.Schedules[i]) + return backupSchedule + } + } + backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, *sp) + return backupSchedule +} + +// getClusterComponentSpec returns the first component name of the componentDefRef. +func (r *clusterBackupPolicyTransformer) getClusterComponentSpec() *appsv1alpha1.ClusterComponentSpec { + for _, v := range r.clusterTransformContext.ComponentSpecs { + if v.ComponentDefRef == r.backupPolicy.ComponentDefRef { + return v + } + } + return nil +} + +func (r *clusterBackupPolicyTransformer) defaultPolicyAnnotationValue() string { + if r.tplCount > 1 && r.isDefaultTemplate != trueVal { + return "false" + } + return trueVal +} + +func (r *clusterBackupPolicyTransformer) buildAnnotations() map[string]string { + annotations := map[string]string{ + dptypes.DefaultBackupPolicyAnnotationKey: r.defaultPolicyAnnotationValue(), + constant.BackupPolicyTemplateAnnotationKey: r.backupPolicyTpl.Name, + } + if r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] != "" { + annotations[dptypes.ReconfigureRefAnnotationKey] = r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] + } + return annotations +} + +func (r *clusterBackupPolicyTransformer) buildLabels() map[string]string { + return map[string]string{ + constant.AppInstanceLabelKey: r.OrigCluster.Name, + constant.KBAppComponentDefRefLabelKey: r.backupPolicy.ComponentDefRef, + constant.AppManagedByLabelKey: constant.AppName, + } +} + +// buildTargetPodLabels builds the target labels for the backup policy that will be +// used to select the target pod. +func (r *clusterBackupPolicyTransformer) buildTargetPodLabels(comp *appsv1alpha1.ClusterComponentSpec) map[string]string { + labels := map[string]string{ + constant.AppInstanceLabelKey: r.OrigCluster.Name, + constant.KBAppComponentLabelKey: comp.Name, + constant.AppManagedByLabelKey: constant.AppName, + } + // append label to filter specific role of the component. + targetTpl := &r.backupPolicy.Target + if workloadHasRoleLabel(r.compWorkloadType) && + len(targetTpl.Role) > 0 && r.getCompReplicas() > 1 { + // the role only works when the component has multiple replicas. + labels[constant.RoleLabelKey] = targetTpl.Role + } + return labels +} + +// generateBackupPolicyName generates the backup policy name which is created from backup policy template. +func generateBackupPolicyName(clusterName, componentDef, identifier string) string { + if len(identifier) == 0 { + return fmt.Sprintf("%s-%s-backup-policy", clusterName, componentDef) + } + return fmt.Sprintf("%s-%s-backup-policy-%s", clusterName, componentDef, identifier) +} + +// generateBackupScheduleName generates the backup schedule name which is created from backup policy template. +func generateBackupScheduleName(clusterName, componentDef, identifier string) string { + if len(identifier) == 0 { + return fmt.Sprintf("%s-%s-backup-schedule", clusterName, componentDef) + } + return fmt.Sprintf("%s-%s-backup-schedule-%s", clusterName, componentDef, identifier) +} + +func buildBackupPathPrefix(cluster *appsv1alpha1.Cluster, compName string) string { + return fmt.Sprintf("/%s-%s/%s", cluster.Name, cluster.UID, compName) +} + +func workloadHasRoleLabel(workloadType appsv1alpha1.WorkloadType) bool { + return slices.Contains([]appsv1alpha1.WorkloadType{appsv1alpha1.Replication, appsv1alpha1.Consensus}, workloadType) +} + +func mergeSchedulePolicy(src *dpv1alpha1.SchedulePolicy, dst *dpv1alpha1.SchedulePolicy) { + if src.Enabled != nil { + dst.Enabled = src.Enabled + } + if src.RetentionPeriod.String() != "" { + dst.RetentionPeriod = src.RetentionPeriod + } + if src.BackupMethod != "" { + dst.BackupMethod = src.BackupMethod + } + if src.CronExpression != "" { + dst.CronExpression = src.CronExpression + } +} diff --git a/controllers/apps/transformer_component_backup.go b/controllers/apps/transformer_component_backup.go deleted file mode 100644 index dbe1b912b99..00000000000 --- a/controllers/apps/transformer_component_backup.go +++ /dev/null @@ -1,576 +0,0 @@ -/* -Copyright (C) 2022-2023 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/pkg/controller/graph" -) - -// componentBackupPolicyTransformer transforms the backup policy template to the data protection backup policy and backup schedule. -type componentBackupPolicyTransformer struct { - *componentTransformContext - - tplCount int - tplIdentifier string - isDefaultTemplate string - - backupPolicyTpl *appsv1alpha1.BackupPolicyTemplate - backupPolicy *appsv1alpha1.BackupPolicy - compWorkloadType appsv1alpha1.WorkloadType -} - -var _ graph.Transformer = &componentBackupPolicyTransformer{} - -func (r *componentBackupPolicyTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - return nil -} - -//// Transform transforms the backup policy template to the backup policy and backup schedule. -//func (r *componentBackupPolicyTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { -// r.componentTransformContext = ctx.(*componentTransformContext) -// if model.IsObjectDeleting(r.componentTransformContext.ComponentOrig) { -// return nil -// } -// -// graphCli, _ := r.componentTransformContext.Client.(model.GraphClient) -// -// clusterDefName := r.ClusterDef.Name -// backupPolicyTpls := &appsv1alpha1.BackupPolicyTemplateList{} -// if err := r.Client.List(r.Context, backupPolicyTpls, -// client.MatchingLabels{constant.ClusterDefLabelKey: clusterDefName}); err != nil { -// return err -// } -// r.tplCount = len(backupPolicyTpls.Items) -// if r.tplCount == 0 { -// return nil -// } -// -// backupPolicyNames := map[string]struct{}{} -// backupScheduleNames := map[string]struct{}{} -// for _, tpl := range backupPolicyTpls.Items { -// r.isDefaultTemplate = tpl.Annotations[dptypes.DefaultBackupPolicyTemplateAnnotationKey] -// r.tplIdentifier = tpl.Spec.Identifier -// r.backupPolicyTpl = &tpl -// -// for i, bp := range tpl.Spec.BackupPolicies { -// compDef := r.ClusterDef.GetComponentDefByName(bp.ComponentDefRef) -// if compDef == nil { -// return intctrlutil.NewNotFound("componentDef %s not found in ClusterDefinition: %s ", -// bp.ComponentDefRef, clusterDefName) -// } -// -// r.backupPolicy = &tpl.Spec.BackupPolicies[i] -// r.compWorkloadType = compDef.WorkloadType -// -// transformBackupPolicy := func() *dpv1alpha1.BackupPolicy { -// // build the data protection backup policy from the template. -// dpBackupPolicy, action := r.transformBackupPolicy() -// if dpBackupPolicy == nil { -// return nil -// } -// -// // if exist multiple backup policy templates and duplicate spec.identifier, -// // the generated backupPolicy may have duplicate names, so it is -// // necessary to check if it already exists. -// if _, ok := backupPolicyNames[dpBackupPolicy.Name]; ok { -// return dpBackupPolicy -// } -// -// switch *action { -// case model.CREATE: -// graphCli.Create(dag, dpBackupPolicy) -// case model.UPDATE: -// graphCli.Update(dag, dpBackupPolicy, dpBackupPolicy) -// } -// backupPolicyNames[dpBackupPolicy.Name] = struct{}{} -// return dpBackupPolicy -// } -// -// transformBackupSchedule := func(backupPolicy *dpv1alpha1.BackupPolicy) { -// // if backup policy is nil, it means that the backup policy template -// // is invalid, backup schedule depends on backup policy, so we do -// // not need to transform backup schedule. -// if backupPolicy == nil { -// return -// } -// -// // only create backup schedule for the default backup policy template -// // if there are more than one backup policy templates. -// if r.isDefaultTemplate != trueVal && r.tplCount > 1 { -// return -// } -// -// // build the data protection backup schedule from the template. -// dpBackupSchedule, action := r.transformBackupSchedule(backupPolicy) -// -// // merge cluster backup configuration into the backup schedule. -// // If the backup schedule is nil, create a new backup schedule -// // based on the cluster backup configuration. -// if dpBackupSchedule == nil { -// action = model.ActionCreatePtr() -// } else if action == nil { -// action = model.ActionUpdatePtr() -// } -// -// // for a cluster, the default backup schedule is created by backup -// // policy template, user can also configure cluster backup in the -// // cluster custom object, such as enable cluster backup, set backup -// // schedule, etc. -// // We always prioritize the cluster backup configuration in the -// // cluster object, so we need to merge the cluster backup configuration -// // into the default backup schedule created by backup policy template -// // if it exists. -// dpBackupSchedule = r.mergeClusterBackup(backupPolicy, dpBackupSchedule) -// if dpBackupSchedule == nil { -// return -// } -// -// // if exist multiple backup policy templates and duplicate spec.identifier, -// // the backupPolicy that may be generated may have duplicate names, -// // and it is necessary to check if it already exists. -// if _, ok := backupScheduleNames[dpBackupSchedule.Name]; ok { -// return -// } -// -// switch *action { -// case model.CREATE: -// graphCli.Create(dag, dpBackupSchedule) -// case model.UPDATE: -// graphCli.Update(dag, dpBackupSchedule, dpBackupSchedule) -// } -// graphCli.DependOn(dag, backupPolicy, dpBackupSchedule) -// backupScheduleNames[dpBackupSchedule.Name] = struct{}{} -// } -// -// // transform backup policy template to data protection backupPolicy -// // and backupSchedule -// policy := transformBackupPolicy() -// transformBackupSchedule(policy) -// } -// } -// return nil -//} -// -//// transformBackupPolicy transforms backup policy template to backup policy. -//func (r *componentBackupPolicyTransformer) transformBackupPolicy() (*dpv1alpha1.BackupPolicy, *model.Action) { -// cluster := r.OrigCluster -// backupPolicyName := generateBackupPolicyName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) -// backupPolicy := &dpv1alpha1.BackupPolicy{} -// if err := r.Client.Get(r.Context, client.ObjectKey{ -// Namespace: cluster.Namespace, -// Name: backupPolicyName, -// }, backupPolicy); client.IgnoreNotFound(err) != nil { -// return nil, nil -// } -// -// if len(backupPolicy.Name) == 0 { -// // build a new backup policy by the backup policy template. -// return r.buildBackupPolicy(backupPolicyName), model.ActionCreatePtr() -// } -// -// // sync the existing backup policy with the cluster changes -// r.syncBackupPolicy(backupPolicy) -// return backupPolicy, model.ActionUpdatePtr() -//} -// -//func (r *componentBackupPolicyTransformer) transformBackupSchedule( -// backupPolicy *dpv1alpha1.BackupPolicy) (*dpv1alpha1.BackupSchedule, *model.Action) { -// cluster := r.OrigCluster -// scheduleName := generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier) -// backupSchedule := &dpv1alpha1.BackupSchedule{} -// if err := r.Client.Get(r.Context, client.ObjectKey{ -// Namespace: cluster.Namespace, -// Name: scheduleName, -// }, backupSchedule); client.IgnoreNotFound(err) != nil { -// return nil, nil -// } -// -// if len(backupSchedule.Name) == 0 { -// // build a new backup schedule from the backup policy template. -// return r.buildBackupSchedule(scheduleName, backupPolicy), model.ActionCreatePtr() -// } -// // sync backup schedule -// r.syncBackupSchedule(backupSchedule) -// return backupSchedule, model.ActionUpdatePtr() -//} -// -//func (r *componentBackupPolicyTransformer) buildBackupSchedule( -// name string, -// backupPolicy *dpv1alpha1.BackupPolicy) *dpv1alpha1.BackupSchedule { -// cluster := r.OrigCluster -// backupSchedule := &dpv1alpha1.BackupSchedule{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: name, -// Namespace: cluster.Namespace, -// Labels: r.buildLabels(), -// Annotations: r.buildAnnotations(), -// }, -// Spec: dpv1alpha1.BackupScheduleSpec{ -// BackupPolicyName: backupPolicy.Name, -// }, -// } -// -// var schedules []dpv1alpha1.SchedulePolicy -// for _, s := range r.backupPolicy.Schedules { -// schedules = append(schedules, dpv1alpha1.SchedulePolicy{ -// BackupMethod: s.BackupMethod, -// CronExpression: s.CronExpression, -// Enabled: s.Enabled, -// RetentionPeriod: r.backupPolicy.RetentionPeriod, -// }) -// } -// backupSchedule.Spec.Schedules = schedules -// return backupSchedule -//} -// -//func (r *componentBackupPolicyTransformer) syncBackupSchedule(backupSchedule *dpv1alpha1.BackupSchedule) { -// scheduleMethodMap := map[string]struct{}{} -// for _, s := range backupSchedule.Spec.Schedules { -// scheduleMethodMap[s.BackupMethod] = struct{}{} -// } -// // sync the newly added schedule policies. -// for _, s := range r.backupPolicy.Schedules { -// if _, ok := scheduleMethodMap[s.BackupMethod]; ok { -// continue -// } -// backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, dpv1alpha1.SchedulePolicy{ -// BackupMethod: s.BackupMethod, -// CronExpression: s.CronExpression, -// Enabled: s.Enabled, -// RetentionPeriod: r.backupPolicy.RetentionPeriod, -// }) -// } -//} -// -//// syncBackupPolicy syncs labels and annotations of the backup policy with the cluster changes. -//func (r *componentBackupPolicyTransformer) syncBackupPolicy(backupPolicy *dpv1alpha1.BackupPolicy) { -// // update labels and annotations of the backup policy. -// if backupPolicy.Annotations == nil { -// backupPolicy.Annotations = map[string]string{} -// } -// if backupPolicy.Labels == nil { -// backupPolicy.Labels = map[string]string{} -// } -// mergeMap(backupPolicy.Annotations, r.buildAnnotations()) -// mergeMap(backupPolicy.Labels, r.buildLabels()) -// -// // update backup repo of the backup policy. -// if r.Cluster.Spec.Backup != nil && r.Cluster.Spec.Backup.RepoName != "" { -// backupPolicy.Spec.BackupRepoName = &r.Cluster.Spec.Backup.RepoName -// } -// -// r.syncBackupMethods(backupPolicy) -// -// // only update the role labelSelector of the backup target instance when -// // component workload is Replication/Consensus. Because the replicas of -// // component will change, such as 2->1. then if the target role is 'follower' -// // and replicas is 1, the target instance can not be found. so we sync the -// // label selector automatically. -// if !workloadHasRoleLabel(r.compWorkloadType) { -// return -// } -// -// comp := r.getClusterComponentSpec() -// if comp == nil { -// return -// } -// -// // convert role labelSelector based on the replicas of the component automatically. -// // TODO(ldm): need more review. -// role := r.backupPolicy.Target.Role -// if len(role) == 0 { -// return -// } -// -// podSelector := backupPolicy.Spec.Target.PodSelector -// if podSelector.LabelSelector == nil || podSelector.LabelSelector.MatchLabels == nil { -// podSelector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} -// } -// if r.getCompReplicas() == 1 { -// delete(podSelector.LabelSelector.MatchLabels, constant.RoleLabelKey) -// } else { -// podSelector.LabelSelector.MatchLabels[constant.RoleLabelKey] = role -// } -//} -// -//func (r *componentBackupPolicyTransformer) getCompReplicas() int32 { -// rsm := &workloads.ReplicatedStateMachine{} -// compSpec := r.getClusterComponentSpec() -// rsmName := fmt.Sprintf("%s-%s", r.Cluster.Name, compSpec.Name) -// if err := r.Client.Get(r.Context, client.ObjectKey{Name: rsmName, Namespace: r.Cluster.Namespace}, rsm); err != nil { -// return compSpec.Replicas -// } -// return *rsm.Spec.Replicas -//} -// -//// buildBackupPolicy builds a new backup policy by the backup policy template. -//func (r *componentBackupPolicyTransformer) buildBackupPolicy(backupPolicyName string) *dpv1alpha1.BackupPolicy { -// comp := r.getClusterComponentSpec() -// if comp == nil { -// return nil -// } -// -// cluster := r.OrigCluster -// backupPolicy := &dpv1alpha1.BackupPolicy{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: backupPolicyName, -// Namespace: cluster.Namespace, -// Labels: r.buildLabels(), -// Annotations: r.buildAnnotations(), -// }, -// } -// r.syncBackupMethods(backupPolicy) -// bpSpec := backupPolicy.Spec -// // if cluster have backup repo, set backup repo name to backup policy. -// if cluster.Spec.Backup != nil && cluster.Spec.Backup.RepoName != "" { -// bpSpec.BackupRepoName = &cluster.Spec.Backup.RepoName -// } -// bpSpec.PathPrefix = buildBackupPathPrefix(cluster, comp.Name) -// bpSpec.Target = r.buildBackupTarget(comp) -// backupPolicy.Spec = bpSpec -// return backupPolicy -//} -// -//// syncBackupMethods syncs the backupMethod of tpl to backupPolicy. -//func (r *componentBackupPolicyTransformer) syncBackupMethods(backupPolicy *dpv1alpha1.BackupPolicy) { -// var backupMethods []dpv1alpha1.BackupMethod -// for _, v := range r.backupPolicy.BackupMethods { -// mappingEnv := r.doEnvMapping(v.EnvMapping) -// v.BackupMethod.Env = dputils.MergeEnv(v.BackupMethod.Env, mappingEnv) -// backupMethods = append(backupMethods, v.BackupMethod) -// } -// backupPolicy.Spec.BackupMethods = backupMethods -//} -// -//func (r *componentBackupPolicyTransformer) doEnvMapping(envMapping []appsv1alpha1.EnvMappingVar) []corev1.EnvVar { -// var env []corev1.EnvVar -// for _, v := range envMapping { -// for _, cv := range v.ValueFrom.ClusterVersionRef { -// if !slices.Contains(cv.Names, r.Cluster.Spec.ClusterVersionRef) { -// continue -// } -// env = append(env, corev1.EnvVar{ -// Name: v.Key, -// Value: cv.MappingValue, -// }) -// } -// } -// return env -//} -// -//func (r *componentBackupPolicyTransformer) buildBackupTarget( -// comp *appsv1alpha1.ClusterComponentSpec) *dpv1alpha1.BackupTarget { -// targetTpl := r.backupPolicy.Target -// clusterName := r.OrigCluster.Name -// -// getSAName := func() string { -// if comp.ServiceAccountName != "" { -// return comp.ServiceAccountName -// } -// return "kb-" + r.Cluster.Name -// } -// -// // build the target connection credential -// cc := dpv1alpha1.ConnectionCredential{} -// if len(targetTpl.Account) > 0 { -// cc.SecretName = fmt.Sprintf("%s-%s-%s", clusterName, comp.Name, targetTpl.Account) -// cc.PasswordKey = constant.AccountPasswdForSecret -// cc.PasswordKey = constant.AccountNameForSecret -// } else { -// cc.SecretName = constant.GenerateDefaultConnCredential(clusterName) -// ccKey := targetTpl.ConnectionCredentialKey -// if ccKey.PasswordKey != nil { -// cc.PasswordKey = *ccKey.PasswordKey -// } -// if ccKey.UsernameKey != nil { -// cc.UsernameKey = *ccKey.UsernameKey -// } -// if ccKey.PortKey != nil { -// cc.PortKey = *ccKey.PortKey -// } -// if ccKey.HostKey != nil { -// cc.HostKey = *ccKey.HostKey -// } -// } -// -// target := &dpv1alpha1.BackupTarget{ -// PodSelector: &dpv1alpha1.PodSelector{ -// Strategy: dpv1alpha1.PodSelectionStrategyAny, -// LabelSelector: &metav1.LabelSelector{ -// MatchLabels: r.buildTargetPodLabels(comp), -// }, -// }, -// ConnectionCredential: &cc, -// ServiceAccountName: getSAName(), -// } -// return target -//} -// -//func (r *componentBackupPolicyTransformer) mergeClusterBackup( -// backupPolicy *dpv1alpha1.BackupPolicy, -// backupSchedule *dpv1alpha1.BackupSchedule) *dpv1alpha1.BackupSchedule { -// cluster := r.OrigCluster -// backupEnabled := func() bool { -// return cluster.Spec.Backup != nil && boolValue(cluster.Spec.Backup.Enabled) -// } -// -// if backupPolicy == nil || cluster.Spec.Backup == nil { -// // backup policy is nil, can not enable cluster backup, so record event and return. -// if backupEnabled() { -// r.EventRecorder.Event(r.Cluster, corev1.EventTypeWarning, -// "BackupPolicyNotFound", "backup policy is nil, can not enable cluster backup") -// } -// return backupSchedule -// } -// -// backup := cluster.Spec.Backup -// // there is no backup schedule created by backup policy template, so we need to -// // create a new backup schedule for cluster backup. -// if backupSchedule == nil { -// backupSchedule = &dpv1alpha1.BackupSchedule{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: generateBackupScheduleName(cluster.Name, r.backupPolicy.ComponentDefRef, r.tplIdentifier), -// Namespace: cluster.Namespace, -// Labels: r.buildLabels(), -// Annotations: r.buildAnnotations(), -// }, -// Spec: dpv1alpha1.BackupScheduleSpec{ -// BackupPolicyName: backupPolicy.Name, -// StartingDeadlineMinutes: backup.StartingDeadlineMinutes, -// Schedules: []dpv1alpha1.SchedulePolicy{}, -// }, -// } -// } -// -// // build backup schedule policy by cluster backup spec -// sp := &dpv1alpha1.SchedulePolicy{ -// Enabled: backup.Enabled, -// RetentionPeriod: backup.RetentionPeriod, -// BackupMethod: backup.Method, -// CronExpression: backup.CronExpression, -// } -// -// // merge cluster backup schedule policy into backup schedule, if the backup -// // schedule with specified method already exists, we need to update it -// // using the cluster backup schedule policy. Otherwise, we need to append -// // it to the backup schedule. -// for i, s := range backupSchedule.Spec.Schedules { -// if s.BackupMethod == backup.Method { -// mergeSchedulePolicy(sp, &backupSchedule.Spec.Schedules[i]) -// return backupSchedule -// } -// } -// backupSchedule.Spec.Schedules = append(backupSchedule.Spec.Schedules, *sp) -// return backupSchedule -//} -// -//// getClusterComponentSpec returns the first component name of the componentDefRef. -//func (r *componentBackupPolicyTransformer) getClusterComponentSpec() *appsv1alpha1.ClusterComponentSpec { -// for _, v := range r.componentTransformContext.ComponentSpecs { -// if v.ComponentDefRef == r.backupPolicy.ComponentDefRef { -// return v -// } -// } -// return nil -//} -// -//func (r *componentBackupPolicyTransformer) defaultPolicyAnnotationValue() string { -// if r.tplCount > 1 && r.isDefaultTemplate != trueVal { -// return "false" -// } -// return trueVal -//} -// -//func (r *componentBackupPolicyTransformer) buildAnnotations() map[string]string { -// annotations := map[string]string{ -// dptypes.DefaultBackupPolicyAnnotationKey: r.defaultPolicyAnnotationValue(), -// constant.BackupPolicyTemplateAnnotationKey: r.backupPolicyTpl.Name, -// } -// if r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] != "" { -// annotations[dptypes.ReconfigureRefAnnotationKey] = r.backupPolicyTpl.Annotations[dptypes.ReconfigureRefAnnotationKey] -// } -// return annotations -//} -// -//func (r *componentBackupPolicyTransformer) buildLabels() map[string]string { -// return map[string]string{ -// constant.AppInstanceLabelKey: r.OrigCluster.Name, -// constant.KBAppComponentDefRefLabelKey: r.backupPolicy.ComponentDefRef, -// constant.AppManagedByLabelKey: constant.AppName, -// } -//} -// -//// buildTargetPodLabels builds the target labels for the backup policy that will be -//// used to select the target pod. -//func (r *componentBackupPolicyTransformer) buildTargetPodLabels(comp *appsv1alpha1.ClusterComponentSpec) map[string]string { -// labels := map[string]string{ -// constant.AppInstanceLabelKey: r.OrigCluster.Name, -// constant.KBAppComponentLabelKey: comp.Name, -// constant.AppManagedByLabelKey: constant.AppName, -// } -// // append label to filter specific role of the component. -// targetTpl := &r.backupPolicy.Target -// if workloadHasRoleLabel(r.compWorkloadType) && -// len(targetTpl.Role) > 0 && r.getCompReplicas() > 1 { -// // the role only works when the component has multiple replicas. -// labels[constant.RoleLabelKey] = targetTpl.Role -// } -// return labels -//} -// -//// generateBackupPolicyName generates the backup policy name which is created from backup policy template. -//func generateBackupPolicyName(clusterName, componentDef, identifier string) string { -// if len(identifier) == 0 { -// return fmt.Sprintf("%s-%s-backup-policy", clusterName, componentDef) -// } -// return fmt.Sprintf("%s-%s-backup-policy-%s", clusterName, componentDef, identifier) -//} -// -//// generateBackupScheduleName generates the backup schedule name which is created from backup policy template. -//func generateBackupScheduleName(clusterName, componentDef, identifier string) string { -// if len(identifier) == 0 { -// return fmt.Sprintf("%s-%s-backup-schedule", clusterName, componentDef) -// } -// return fmt.Sprintf("%s-%s-backup-schedule-%s", clusterName, componentDef, identifier) -//} -// -//func buildBackupPathPrefix(cluster *appsv1alpha1.Cluster, compName string) string { -// return fmt.Sprintf("/%s-%s/%s", cluster.Name, cluster.UID, compName) -//} -// -//func workloadHasRoleLabel(workloadType appsv1alpha1.WorkloadType) bool { -// return slices.Contains([]appsv1alpha1.WorkloadType{appsv1alpha1.Replication, appsv1alpha1.Consensus}, workloadType) -//} -// -//func mergeSchedulePolicy(src *dpv1alpha1.SchedulePolicy, dst *dpv1alpha1.SchedulePolicy) { -// if src.Enabled != nil { -// dst.Enabled = src.Enabled -// } -// if src.RetentionPeriod.String() != "" { -// dst.RetentionPeriod = src.RetentionPeriod -// } -// if src.BackupMethod != "" { -// dst.BackupMethod = src.BackupMethod -// } -// if src.CronExpression != "" { -// dst.CronExpression = src.CronExpression -// } -//}