diff --git a/Makefile b/Makefile index 0edc7cf..7c507c3 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ $(LOCALBIN): CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ## Tool Versions -CONTROLLER_TOOLS_VERSION ?= v0.9.2 +CONTROLLER_TOOLS_VERSION ?= v0.14.0 .PHONY: controller-gen controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. diff --git a/go.mod b/go.mod index e9b2305..6c14503 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,15 @@ module github.com/sap/component-operator-runtime -go 1.21.7 +go 1.22.1 require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/hashicorp/go-multierror v1.1.1 + github.com/iancoleman/strcase v0.3.0 github.com/pkg/errors v0.9.1 - github.com/sap/go-generics v0.2.0 + github.com/sap/go-generics v0.2.3 github.com/spf13/pflag v1.0.5 + golang.org/x/time v0.5.0 k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.2 k8s.io/apimachinery v0.29.2 @@ -71,7 +73,6 @@ require ( golang.org/x/sys v0.17.0 // indirect golang.org/x/term v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.5.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.32.0 // indirect diff --git a/go.sum b/go.sum index a27ef20..06295b3 100644 --- a/go.sum +++ b/go.sum @@ -68,6 +68,8 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -98,8 +100,8 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM= +github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -116,8 +118,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/sap/go-generics v0.2.0 h1:uXjK6eZDj4XFe52KiMfX7YsHJ+YyOrhUgohe1hNT/78= -github.com/sap/go-generics v0.2.0/go.mod h1:LPjEUR4matw9C7GZdHYMExVN8+LeNK5LmrL24JKr8eg= +github.com/sap/go-generics v0.2.3 h1:cEY63YaVIqvOu2347drCilMvdgM1p2we2QwY4k/Nas0= +github.com/sap/go-generics v0.2.3/go.mod h1:eBhccCEzOiM5dn1W2kupUMOAm4uS9CfKHzQsDlZHQzc= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= diff --git a/internal/backoff/backoff.go b/internal/backoff/backoff.go index 3fea289..a74d2e3 100644 --- a/internal/backoff/backoff.go +++ b/internal/backoff/backoff.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "golang.org/x/time/rate" "k8s.io/client-go/util/workqueue" ) @@ -21,7 +22,14 @@ type Backoff struct { func NewBackoff(maxDelay time.Duration) *Backoff { return &Backoff{ activities: make(map[any]any), - limiter: workqueue.NewItemExponentialFailureRateLimiter(20*time.Millisecond, maxDelay), + // resulting per-item backoff is the maximum of a 300-times-20ms-then-maxDelay per-item limiter, + // and an overall 10-per-second-burst-20 bucket limiter; + // as a consequence, we have up to 20 almost immediate retries, then a phase of 10 retries per seconnd + // for approximately 30s, and then slow retries at the rate given by maxDelay + limiter: workqueue.NewMaxOfRateLimiter( + workqueue.NewItemFastSlowRateLimiter(20*time.Millisecond, maxDelay, 300), + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 20)}, + ), } } diff --git a/internal/kstatus/analyzer.go b/internal/kstatus/analyzer.go new file mode 100644 index 0000000..22c8985 --- /dev/null +++ b/internal/kstatus/analyzer.go @@ -0,0 +1,115 @@ +/* +SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and component-operator-runtime contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +package kstatus + +import ( + "strings" + + "github.com/iancoleman/strcase" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" + + "github.com/sap/component-operator-runtime/pkg/types" +) + +const conditionTypeReady = "Ready" + +type statusAnalyzer struct { + reconcilerName string +} + +func NewStatusAnalyzer(reconcilerName string) StatusAnalyzer { + return &statusAnalyzer{ + reconcilerName: reconcilerName, + } +} + +func (s *statusAnalyzer) ComputeStatus(object *unstructured.Unstructured) (Status, error) { + if hint, ok := object.GetAnnotations()[s.reconcilerName+"/"+types.AnnotationKeySuffixStatusHint]; ok { + object = object.DeepCopy() + + for _, hint := range strings.Split(hint, ",") { + switch strcase.ToKebab(hint) { + case types.StatusHintHasObservedGeneration: + _, found, err := unstructured.NestedInt64(object.Object, "status", "observedGeneration") + if err != nil { + return UnknownStatus, err + } + if !found { + if err := unstructured.SetNestedField(object.Object, -1, "status", "observedGeneration"); err != nil { + return UnknownStatus, err + } + } + case types.StatusHintHasReadyCondition: + foundReadyCondition := false + conditions, found, err := unstructured.NestedSlice(object.Object, "status", "conditions") + if err != nil { + return UnknownStatus, err + } + if !found { + conditions = make([]any, 0) + } + for _, condition := range conditions { + if condition, ok := condition.(map[string]any); ok { + condType, found, err := unstructured.NestedString(condition, "type") + if err != nil { + return UnknownStatus, err + } + if found && condType == conditionTypeReady { + foundReadyCondition = true + break + } + } + } + if !foundReadyCondition { + conditions = append(conditions, map[string]any{ + "type": conditionTypeReady, + "status": string(corev1.ConditionUnknown), + }) + if err := unstructured.SetNestedSlice(object.Object, conditions, "status", "conditions"); err != nil { + return UnknownStatus, err + } + } + } + } + } + + res, err := kstatus.Compute(object) + if err != nil { + return UnknownStatus, err + } + + switch object.GroupVersionKind() { + case schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}: + // other than kstatus we want to consider jobs as InProgress if its pods are still running, resp. did not (yet) finish successfully + if res.Status == kstatus.CurrentStatus { + done := false + objc, err := kstatus.GetObjectWithConditions(object.UnstructuredContent()) + if err != nil { + return UnknownStatus, err + } + for _, cond := range objc.Status.Conditions { + if cond.Type == string(batchv1.JobComplete) && cond.Status == corev1.ConditionTrue { + done = true + break + } + if cond.Type == string(batchv1.JobFailed) && cond.Status == corev1.ConditionTrue { + done = true + break + } + } + if !done { + res.Status = kstatus.InProgressStatus + } + } + } + + return Status(res.Status), nil +} diff --git a/internal/kstatus/status.go b/internal/kstatus/status.go new file mode 100644 index 0000000..dad03a1 --- /dev/null +++ b/internal/kstatus/status.go @@ -0,0 +1,10 @@ +/* +SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and component-operator-runtime contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +package kstatus + +func (s Status) String() string { + return string(s) +} diff --git a/internal/kstatus/types.go b/internal/kstatus/types.go new file mode 100644 index 0000000..5fcf5b9 --- /dev/null +++ b/internal/kstatus/types.go @@ -0,0 +1,29 @@ +/* +SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and component-operator-runtime contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +package kstatus + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" +) + +// TODO: the StatusAnalyzer interface should be public. + +// The StatusAnalyzer interface models types which allow to extract a kstatus-compatible status from an object. +type StatusAnalyzer interface { + ComputeStatus(object *unstructured.Unstructured) (Status, error) +} + +type Status kstatus.Status + +const ( + InProgressStatus Status = Status(kstatus.InProgressStatus) + FailedStatus Status = Status(kstatus.FailedStatus) + CurrentStatus Status = Status(kstatus.CurrentStatus) + TerminatingStatus Status = Status(kstatus.TerminatingStatus) + NotFoundStatus Status = Status(kstatus.NotFoundStatus) + UnknownStatus Status = Status(kstatus.UnknownStatus) +) diff --git a/pkg/component/reconcile.go b/pkg/component/reconcile.go index b3c9361..de072b7 100644 --- a/pkg/component/reconcile.go +++ b/pkg/component/reconcile.go @@ -32,6 +32,7 @@ import ( "github.com/sap/component-operator-runtime/internal/backoff" "github.com/sap/component-operator-runtime/internal/cluster" + "github.com/sap/component-operator-runtime/internal/kstatus" "github.com/sap/component-operator-runtime/pkg/manifests" "github.com/sap/component-operator-runtime/pkg/types" ) @@ -86,86 +87,13 @@ type ReconcilerOptions struct { SchemeBuilder types.SchemeBuilder } -// AdoptionPolicy defines how the reconciler reacts if a dependent object exists but has no or a different owner. -type AdoptionPolicy string - -const ( - // Fail if the dependent object exists but has no or a different owner. - AdoptionPolicyNever AdoptionPolicy = "Never" - // Adopt existing dependent objects if they have no owner set. - AdoptionPolicyIfUnowned AdoptionPolicy = "IfUnowned" - // Adopt existing dependent objects, even if they have a conflicting owner. - AdoptionPolicyAlways AdoptionPolicy = "Always" -) - -var adoptionPolicyByAnnotation = map[string]AdoptionPolicy{ - types.AdoptionPolicyNever: AdoptionPolicyNever, - types.AdoptionPolicyIfUnowned: AdoptionPolicyIfUnowned, - types.AdoptionPolicyAlways: AdoptionPolicyAlways, -} - -// ReconcilePolicy defines when the reconciler will reconcile the dependent object. -type ReconcilePolicy string - -const ( - // Reconcile the dependent object if its manifest, as produced by the generator, changes. - ReconcilePolicyOnObjectChange ReconcilePolicy = "OnObjectChange" - // Reconcile the dependent object if its manifest, as produced by the generator, changes, or if the owning - // component changes (identified by a change of its metadata.generation). - ReconcilePolicyOnObjectOrComponentChange ReconcilePolicy = "OnObjectOrComponentChange" - // Reconcile the dependent object only once; afterwards it will never be touched again by the reconciler. - ReconcilePolicyOnce ReconcilePolicy = "Once" -) - -var reconcilePolicyByAnnotation = map[string]ReconcilePolicy{ - types.ReconcilePolicyOnObjectChange: ReconcilePolicyOnObjectChange, - types.ReconcilePolicyOnObjectOrComponentChange: ReconcilePolicyOnObjectOrComponentChange, - types.ReconcilePolicyOnce: ReconcilePolicyOnce, -} - -// UpdatePolicy defines how the reconciler will update dependent objects. -type UpdatePolicy string - -const ( - // Recreate (that is: delete and create) existing dependent objects. - UpdatePolicyRecreate UpdatePolicy = "Recreate" - // Replace existing dependent objects. - UpdatePolicyReplace UpdatePolicy = "Replace" - // Use server side apply to update existing dependents. - UpdatePolicySsaMerge UpdatePolicy = "SsaMerge" - // Use server side apply to update existing dependents and, in addition, reclaim fields owned by certain - // field owners, such as kubectl or helm. - UpdatePolicySsaOverride UpdatePolicy = "SsaOverride" -) - -var updatePolicyByAnnotation = map[string]UpdatePolicy{ - types.UpdatePolicyRecreate: UpdatePolicyRecreate, - types.UpdatePolicyReplace: UpdatePolicyReplace, - types.UpdatePolicySsaMerge: UpdatePolicySsaMerge, - types.UpdatePolicySsaOverride: UpdatePolicySsaOverride, -} - -// DeletePolicy defines how the reconciler will delete dependent objects. -type DeletePolicy string - -const ( - // Delete dependent objects. - DeletePolicyDelete DeletePolicy = "Delete" - // Orphan dependent objects. - DeletePolicyOrphan DeletePolicy = "Orphan" -) - -var deletePolicyByAnnotation = map[string]DeletePolicy{ - types.DeletePolicyDelete: DeletePolicyDelete, - types.DeletePolicyOrphan: DeletePolicyOrphan, -} - // Reconciler provides the implementation of controller-runtime's Reconciler interface, for a given Component type T. type Reconciler[T Component] struct { name string id string client cluster.Client resourceGenerator manifests.Generator + statusAnalyzer kstatus.StatusAnalyzer options ReconcilerOptions clients *cluster.ClientFactory backoff *backoff.Backoff @@ -196,6 +124,7 @@ func NewReconciler[T Component](name string, resourceGenerator manifests.Generat return &Reconciler[T]{ name: name, resourceGenerator: resourceGenerator, + statusAnalyzer: kstatus.NewStatusAnalyzer(name), options: options, backoff: backoff.NewBackoff(10 * time.Second), postReadHooks: []HookFunc[T]{resolveReferences[T]}, @@ -249,6 +178,11 @@ func (r *Reconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) (result // always attempt to update the status skipStatusUpdate := false defer func() { + if r := recover(); r != nil { + log.Error(fmt.Errorf("panic occurred during reconcile"), "panic", r) + // re-panic in order skip the remaining steps + panic(r) + } log.V(1).Info("reconcile done", "withError", err != nil, "requeue", result.Requeue || result.RequeueAfter > 0, "requeueAfter", result.RequeueAfter.String()) if status.State == StateReady || err != nil { r.backoff.Forget(req) @@ -314,7 +248,7 @@ func (r *Reconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) (result if err != nil { return ctrl.Result{}, errors.Wrap(err, "error getting client for component") } - target := newReconcileTarget[T](r.name, r.id, targetClient, r.resourceGenerator, *r.options.CreateMissingNamespaces, *r.options.AdoptionPolicy, *r.options.UpdatePolicy) + target := newReconcileTarget[T](r.name, r.id, targetClient, r.resourceGenerator, r.statusAnalyzer, *r.options.CreateMissingNamespaces, *r.options.AdoptionPolicy, *r.options.UpdatePolicy) hookCtx := newContext(ctx).WithClient(targetClient) // do the reconciliation diff --git a/pkg/component/status.go b/pkg/component/status.go deleted file mode 100644 index d355ac0..0000000 --- a/pkg/component/status.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and component-operator-runtime contributors -SPDX-License-Identifier: Apache-2.0 -*/ - -package component - -import ( - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" -) - -// wrapper around kstatus.Compute, allowing us to modify kstatus's view for certain objects -func computeStatus(obj *unstructured.Unstructured) (*kstatus.Result, error) { - res, err := kstatus.Compute(obj) - if err != nil { - return nil, err - } - switch obj.GroupVersionKind() { - case schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}: - // other than kstatus we want to consider jobs as InProgress if its pods are still running, resp. did not (yet) finish successfully - if res.Status == kstatus.CurrentStatus { - done := false - objc, err := kstatus.GetObjectWithConditions(obj.UnstructuredContent()) - if err != nil { - return nil, err - } - for _, cond := range objc.Status.Conditions { - if cond.Type == string(batchv1.JobComplete) && cond.Status == corev1.ConditionTrue { - done = true - break - } - if cond.Type == string(batchv1.JobFailed) && cond.Status == corev1.ConditionTrue { - done = true - break - } - } - if !done { - res.Status = kstatus.InProgressStatus - } - } - } - return res, nil -} diff --git a/pkg/component/target.go b/pkg/component/target.go index 378569f..7b4b218 100644 --- a/pkg/component/target.go +++ b/pkg/component/target.go @@ -13,6 +13,7 @@ import ( "strconv" "strings" + "github.com/iancoleman/strcase" "github.com/pkg/errors" "github.com/sap/go-generics/slices" @@ -27,13 +28,13 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" apitypes "k8s.io/apimachinery/pkg/types" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/sap/component-operator-runtime/internal/cluster" + "github.com/sap/component-operator-runtime/internal/kstatus" "github.com/sap/component-operator-runtime/pkg/manifests" "github.com/sap/component-operator-runtime/pkg/types" ) @@ -52,51 +53,85 @@ const ( scopeCluster ) +const ( + minOrder = math.MinInt16 + maxOrder = math.MaxInt16 +) + +var adoptionPolicyByAnnotation = map[string]AdoptionPolicy{ + types.AdoptionPolicyNever: AdoptionPolicyNever, + types.AdoptionPolicyIfUnowned: AdoptionPolicyIfUnowned, + types.AdoptionPolicyAlways: AdoptionPolicyAlways, +} + +var reconcilePolicyByAnnotation = map[string]ReconcilePolicy{ + types.ReconcilePolicyOnObjectChange: ReconcilePolicyOnObjectChange, + types.ReconcilePolicyOnObjectOrComponentChange: ReconcilePolicyOnObjectOrComponentChange, + types.ReconcilePolicyOnce: ReconcilePolicyOnce, +} + +var updatePolicyByAnnotation = map[string]UpdatePolicy{ + types.UpdatePolicyRecreate: UpdatePolicyRecreate, + types.UpdatePolicyReplace: UpdatePolicyReplace, + types.UpdatePolicySsaMerge: UpdatePolicySsaMerge, + types.UpdatePolicySsaOverride: UpdatePolicySsaOverride, +} + +var deletePolicyByAnnotation = map[string]DeletePolicy{ + types.DeletePolicyDelete: DeletePolicyDelete, + types.DeletePolicyOrphan: DeletePolicyOrphan, +} + type reconcileTarget[T Component] struct { reconcilerName string reconcilerId string client cluster.Client resourceGenerator manifests.Generator + statusAnalyzer kstatus.StatusAnalyzer createMissingNamespaces bool adoptionPolicy AdoptionPolicy reconcilePolicy ReconcilePolicy updatePolicy UpdatePolicy deletePolicy DeletePolicy labelKeyOwnerId string + annotationKeyOwnerId string annotationKeyDigest string annotationKeyAdoptionPolicy string annotationKeyReconcilePolicy string annotationKeyUpdatePolicy string annotationKeyDeletePolicy string - annotationKeyOrder string + annotationKeyApplyOrder string annotationKeyPurgeOrder string - annotationKeyOwnerId string + annotationKeyDeleteOrder string } -func newReconcileTarget[T Component](reconcilerName string, reconcilerId string, clnt cluster.Client, resourceGenerator manifests.Generator, createMissingNamespaces bool, adoptionPolicy AdoptionPolicy, updatePolicy UpdatePolicy) *reconcileTarget[T] { +func newReconcileTarget[T Component](reconcilerName string, reconcilerId string, clnt cluster.Client, resourceGenerator manifests.Generator, statusAnalyzer kstatus.StatusAnalyzer, createMissingNamespaces bool, adoptionPolicy AdoptionPolicy, updatePolicy UpdatePolicy) *reconcileTarget[T] { return &reconcileTarget[T]{ reconcilerName: reconcilerName, reconcilerId: reconcilerId, client: clnt, resourceGenerator: resourceGenerator, + statusAnalyzer: statusAnalyzer, createMissingNamespaces: createMissingNamespaces, adoptionPolicy: adoptionPolicy, reconcilePolicy: ReconcilePolicyOnObjectChange, updatePolicy: updatePolicy, deletePolicy: DeletePolicyDelete, labelKeyOwnerId: reconcilerName + "/" + types.LabelKeySuffixOwnerId, + annotationKeyOwnerId: reconcilerName + "/" + types.AnnotationKeySuffixOwnerId, annotationKeyDigest: reconcilerName + "/" + types.AnnotationKeySuffixDigest, annotationKeyAdoptionPolicy: reconcilerName + "/" + types.AnnotationKeySuffixAdoptionPolicy, annotationKeyReconcilePolicy: reconcilerName + "/" + types.AnnotationKeySuffixReconcilePolicy, annotationKeyUpdatePolicy: reconcilerName + "/" + types.AnnotationKeySuffixUpdatePolicy, annotationKeyDeletePolicy: reconcilerName + "/" + types.AnnotationKeySuffixDeletePolicy, - annotationKeyOrder: reconcilerName + "/" + types.AnnotationKeySuffixOrder, + annotationKeyApplyOrder: reconcilerName + "/" + types.AnnotationKeySuffixApplyOrder, annotationKeyPurgeOrder: reconcilerName + "/" + types.AnnotationKeySuffixPurgeOrder, - annotationKeyOwnerId: reconcilerName + "/" + types.AnnotationKeySuffixOwnerId, + annotationKeyDeleteOrder: reconcilerName + "/" + types.AnnotationKeySuffixDeleteOrder, } } func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, error) { + log := log.FromContext(ctx) namespace := "" name := "" if placementConfiguration, ok := assertPlacementConfiguration(component); ok { @@ -220,6 +255,7 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, } for _, crd := range getCrds(objects) { if crd.Spec.Group == gvk.Group && crd.Spec.Names.Kind == gvk.Kind { + // TODO: validate that scope obtained from crd matches scope from rest mapping (if one was found there) scope = scopeFromCrd(crd) err = nil break @@ -238,7 +274,19 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, if object.GetNamespace() == "" && scope == scopeNamespaced { object.SetNamespace(namespace) } + if object.GetNamespace() != "" && scope == scopeCluster { + object.SetNamespace("") + } } + // note: after this point there still can be objects in the list which + // - have a namespace set although they are not namespaced + // - do not have a namespace set although they are namespaced + // which exactly happens if + // 1. the generator provided wrong information and + // 2. calling RESTMapping() above returned a NoMatchError (i.e. the type is currently not known to the api server) and + // 3. the type belongs to a (new) api service which is part of this component + // such entries can cause trouble, e.g. because InventoryItem.Match() might not work reliably ... + // TODO: should we allow at all that api services and according instances are part of the same component? // validate annotations for _, object := range objects { @@ -254,37 +302,80 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, if _, err := t.getDeletePolicy(object); err != nil { return false, errors.Wrapf(err, "error validating object %s", types.ObjectKeyToString(object)) } - if _, err := t.getOrder(object); err != nil { + if _, err := t.getApplyOrder(object); err != nil { return false, errors.Wrapf(err, "error validating object %s", types.ObjectKeyToString(object)) } if _, err := t.getPurgeOrder(object); err != nil { return false, errors.Wrapf(err, "error validating object %s", types.ObjectKeyToString(object)) } + if _, err := t.getDeleteOrder(object); err != nil { + return false, errors.Wrapf(err, "error validating object %s", types.ObjectKeyToString(object)) + } + // TODO: should status-hint be validated here as well? } // define getter functions for later usage - getOrder := func(object client.Object) int { + getAdoptionPolicy := func(object client.Object) AdoptionPolicy { + // note: this must() is ok because we checked the generated objects above, and this function will be called for these objects only + return must(t.getAdoptionPolicy(object)) + } + getReconcilePolicy := func(object client.Object) ReconcilePolicy { + // note: this must() is ok because we checked the generated objects above, and this function will be called for these objects only + return must(t.getReconcilePolicy(object)) + } + getUpdatePolicy := func(object client.Object) UpdatePolicy { + // note: this must() is ok because we checked the generated objects above, and this function will be called for these objects only + return must(t.getUpdatePolicy(object)) + } + getDeletePolicy := func(object client.Object) DeletePolicy { + // note: this must() is ok because we checked the generated objects above, and this function will be called for these objects only + return must(t.getDeletePolicy(object)) + } + getApplyOrder := func(object client.Object) int { // note: this must() is ok because we checked the generated objects above, and this function will be called for these objects only - return must(t.getOrder(object)) + return must(t.getApplyOrder(object)) } getPurgeOrder := func(object client.Object) int { // note: this must() is ok because we checked the generated objects above, and this function will be called for these objects only return must(t.getPurgeOrder(object)) } + getDeleteOrder := func(object client.Object) int { + // note: this must() is ok because we checked the generated objects above, and this function will be called for these objects only + return must(t.getDeleteOrder(object)) + } + + // perform further validations of object set + for _, object := range objects { + switch { + case isNamespace(object): + if getPurgeOrder(object) <= maxOrder { + return false, errors.Wrapf(fmt.Errorf("namespaces must not define a purge order"), "error validating object %s", types.ObjectKeyToString(object)) + } + case isCrd(object): + if getPurgeOrder(object) <= maxOrder { + return false, errors.Wrapf(fmt.Errorf("custom resource definitions must not define a purge order"), "error validating object %s", types.ObjectKeyToString(object)) + } + case isApiService(object): + if getPurgeOrder(object) <= maxOrder { + return false, errors.Wrapf(fmt.Errorf("api services must not define a purge order"), "error validating object %s", types.ObjectKeyToString(object)) + } + } + } // add/update inventory with target objects + // TODO: review this; it would be cleaner to use a DeepCopy method for a []*InventoryItem type (if there would be such a type) + inventory := slices.Collect(status.Inventory, func(item *InventoryItem) *InventoryItem { return item.DeepCopy() }) numAdded := 0 for _, object := range objects { // retrieve inventory item belonging to this object (if existing) - item := getItem(status.Inventory, object) + item := getItem(inventory, object) // calculate object digest digest, err := calculateObjectDigest(object) if err != nil { return false, errors.Wrapf(err, "error calculating digest for object %s", types.ObjectKeyToString(object)) } - // note: this must() is ok because we checked the generated objects above - switch must(t.getReconcilePolicy(object)) { + switch getReconcilePolicy(object) { case ReconcilePolicyOnObjectOrComponentChange: digest = fmt.Sprintf("%s@%d", digest, component.GetGeneration()) case ReconcilePolicyOnce: @@ -300,9 +391,9 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, return false, errors.Wrapf(err, "error reading object %s", types.ObjectKeyToString(object)) } // check ownership + // note: failing already here in case of a conflict prevents problems during apply and, in particular, during deletion if existingObject != nil { - // note: this must() is ok because we checked the generated objects above - adoptionPolicy := must(t.getAdoptionPolicy(object)) + adoptionPolicy := getAdoptionPolicy(object) existingOwnerId := existingObject.GetLabels()[t.labelKeyOwnerId] if existingOwnerId == "" { if adoptionPolicy != AdoptionPolicyIfUnowned && adoptionPolicy != AdoptionPolicyAlways { @@ -314,20 +405,26 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, } } } - status.Inventory = append(status.Inventory, &InventoryItem{}) - item = status.Inventory[len(status.Inventory)-1] + inventory = append(inventory, &InventoryItem{}) + item = inventory[len(inventory)-1] numAdded++ } // update item + gvk := object.GetObjectKind().GroupVersionKind() + item.Group = gvk.Group + item.Version = gvk.Version + item.Kind = gvk.Kind + item.Namespace = object.GetNamespace() + item.Name = object.GetName() + item.AdoptionPolicy = getAdoptionPolicy(object) + item.ReconcilePolicy = getReconcilePolicy(object) + item.UpdatePolicy = getUpdatePolicy(object) + item.DeletePolicy = getDeletePolicy(object) + item.ApplyOrder = getApplyOrder(object) + item.DeleteOrder = getDeleteOrder(object) + item.ManagedTypes = getManagedTypes(object) if digest != item.Digest { - gvk := object.GetObjectKind().GroupVersionKind() - item.Group = gvk.Group - item.Version = gvk.Version - item.Kind = gvk.Kind - item.Namespace = object.GetNamespace() - item.Name = object.GetName() - item.ManagedTypes = getManagedTypes(object) item.Digest = digest item.Phase = PhaseScheduledForApplication item.Status = kstatus.InProgressStatus.String() @@ -335,7 +432,7 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, } // mark obsolete inventory items (clear digest) - for _, item := range status.Inventory { + for _, item := range inventory { found := false for _, object := range objects { if item.Matches(object) { @@ -350,31 +447,81 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, } } - // trigger another reconcile + // validate object set: + // - check that all managed instances have apply-order greater than or equal to the according managed type + // - check that all managed instances have delete-order less than or equal to the according managed type + // - check that no managed types are about to be deleted (empty digest) unless all related managed instances are as well + // - check that all contained objects have apply-order greater than or equal to the according namespace + // - check that all contained objects have delete-order less than or equal to the according namespace + // - check that no namespaces are about to be deleted (empty digest) unless all contained objects are as well + for _, item := range inventory { + if isCrd(item) || isApiService(item) { + for _, _item := range inventory { + if isManagedBy(item, _item) { + if _item.ApplyOrder < item.ApplyOrder { + return false, fmt.Errorf("error valdidating object set (%s): managed instance must not have an apply order lesser than the one of its type", _item) + } + if _item.DeleteOrder > item.DeleteOrder { + return false, fmt.Errorf("error valdidating object set (%s): managed instance must not have a delete order greater than the one of its type", _item) + } + if _item.Digest != "" && item.Digest == "" { + return false, fmt.Errorf("error valdidating object set (%s): managed instance is not being deleted, but the managing type is", _item) + } + } + } + } + if isNamespace(item) { + for _, _item := range inventory { + if _item.Namespace == item.Name { + if _item.ApplyOrder < item.ApplyOrder { + return false, fmt.Errorf("error valdidating object set (%s): namespaced object must not have an apply order lesser than the one of its namespace", _item) + } + if _item.DeleteOrder > item.DeleteOrder { + return false, fmt.Errorf("error valdidating object set (%s): namespaced object must not have a delete order greater than the one of its namespace", _item) + } + if _item.Digest != "" && item.Digest == "" { + return false, fmt.Errorf("error valdidating object set (%s): namespaced object is not being deleted, but the namespace is", _item) + } + } + } + } + } + + // accept inventory for further processing, put into right order for future deletion + status.Inventory = sortObjectsForDelete(inventory) + + // trigger another reconcile if something was added (to be sure that it is persisted) if numAdded > 0 { - // put inventory into right order for future deletion - status.Inventory = sortObjectsForDelete(status.Inventory) return false, nil } - // note: after this point it is guaranteed that the persisted inventory reflects the target state + // note: after this point it is guaranteed that + // - the in-memory inventory reflects the target state + // - the persisted inventory at least has the same object keys as the in-memory inventory // now it is about to synchronize the cluster state with the inventory - // TODO: delete-order - // count instances of managed types which are about to be deleted + // delete redundant objects and maintain inventory; + // objects are deleted in waves according to their delete order; + // that means, only if all redundant objects of a wave are gone or comppleted, the next + // wave will be processed; within each wave, objects which are instances of managed + // types are deleted before all other objects, and namespaces will only be deleted + // if they are not used by any object in the inventory (note that this may cause deadlocks) numManagedToBeDeleted := 0 - for _, item := range status.Inventory { - if item.Phase == PhaseScheduledForDeletion || item.Phase == PhaseScheduledForCompletion || item.Phase == PhaseDeleting || item.Phase == PhaseCompleting { - if isManaged(status.Inventory, item) { - numManagedToBeDeleted++ + numToBeDeleted := 0 + for k, item := range status.Inventory { + // if this is the first object of an order, then + // count instances of managed types in this wave which are about to be deleted + if k == 0 || status.Inventory[k-1].DeleteOrder < item.DeleteOrder { + log.V(2).Info("begin of deletion wave", "order", item.DeleteOrder) + numManagedToBeDeleted = 0 + for j := k; j < len(status.Inventory) && status.Inventory[j].DeleteOrder == item.DeleteOrder; j++ { + _item := status.Inventory[j] + if (_item.Phase == PhaseScheduledForDeletion || _item.Phase == PhaseScheduledForCompletion || _item.Phase == PhaseDeleting || _item.Phase == PhaseCompleting) && isInstanceOfManagedType(status.Inventory, _item) { + numManagedToBeDeleted++ + } } } - } - // delete redundant objects and maintain inventory - numToBeDeleted := 0 - var inventory []*InventoryItem - for _, item := range status.Inventory { if item.Phase == PhaseScheduledForDeletion || item.Phase == PhaseScheduledForCompletion || item.Phase == PhaseDeleting || item.Phase == PhaseCompleting { // fetch object (if existing) existingObject, err := t.readObject(ctx, item) @@ -382,49 +529,55 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, return false, errors.Wrapf(err, "error reading object %s", item) } - orphan := false - if existingObject != nil { - deletePolicy, err := t.getDeletePolicy(existingObject) - if err != nil { - // note: this should not happen under normal circumstances, because we checked the annotation when persisting it - return false, errors.Wrapf(err, "error validating existing object %s", types.ObjectKeyToString(existingObject)) - } - orphan = deletePolicy == DeletePolicyOrphan - } + orphan := item.DeletePolicy == DeletePolicyOrphan switch item.Phase { case PhaseScheduledForDeletion: - if numManagedToBeDeleted == 0 || isManaged(status.Inventory, item) { + // delete namespaces after all contained inventory items + // delete all instances of managed types before remaining objects; this ensures that no objects are prematurely + // deleted which are needed for the deletion of the managed instances, such as webhook servers, api servers, ... + if (!isNamespace(item) || !isNamespaceUsed(status.Inventory, item.Name)) && (numManagedToBeDeleted == 0 || isInstanceOfManagedType(status.Inventory, item)) { if orphan { - continue - } - // note: here is a theoretical risk that we delete an existing foreign object, because informers are not yet synced - // however not sending the delete request is also not an option, because this might lead to orphaned own dependents - if err := t.deleteObject(ctx, item, existingObject); err != nil { - return false, errors.Wrapf(err, "error deleting object %s", item) + item.Phase = "" + } else { + // note: here is a theoretical risk that we delete an existing foreign object, because informers are not yet synced + // however not sending the delete request is also not an option, because this might lead to orphaned own dependents + // TODO: perform an additional owner id check + if err := t.deleteObject(ctx, item, existingObject); err != nil { + return false, errors.Wrapf(err, "error deleting object %s", item) + } + item.Phase = PhaseDeleting + item.Status = kstatus.TerminatingStatus.String() + numToBeDeleted++ } - item.Phase = PhaseDeleting - item.Status = kstatus.TerminatingStatus.String() + } else { + numToBeDeleted++ } - numToBeDeleted++ case PhaseScheduledForCompletion: - if numManagedToBeDeleted == 0 || isManaged(status.Inventory, item) { + // delete namespaces after all contained inventory items + // delete all instances of managed types before remaining objects; this ensures that no objects are prematurely + // deleted which are needed for the deletion of the managed instances, such as webhook servers, api servers, ... + if (!isNamespace(item) || !isNamespaceUsed(status.Inventory, item.Name)) && (numManagedToBeDeleted == 0 || isInstanceOfManagedType(status.Inventory, item)) { if orphan { - return false, fmt.Errorf("invalid usage of deletion policy: object %s is scheduled for completion (due to purge order) and therefore cannot be orphaned", item) - } - // note: here is a theoretical risk that we delete an existing foreign object, because informers are not yet synced - // however not sending the delete request is also not an option, because this might lead to orphaned own dependents - if err := t.deleteObject(ctx, item, existingObject); err != nil { - return false, errors.Wrapf(err, "error deleting object %s", item) + return false, fmt.Errorf("invalid usage of deletion policy: object %s is scheduled for completion and therefore cannot be orphaned", item) + } else { + // note: here is a theoretical risk that we delete an existing foreign object, because informers are not yet synced + // however not sending the delete request is also not an option, because this might lead to orphaned own dependents + // TODO: perform an additional owner id check + if err := t.deleteObject(ctx, item, existingObject); err != nil { + return false, errors.Wrapf(err, "error deleting object %s", item) + } + item.Phase = PhaseCompleting + item.Status = kstatus.TerminatingStatus.String() + numToBeDeleted++ } - item.Phase = PhaseCompleting - item.Status = kstatus.TerminatingStatus.String() + } else { + numToBeDeleted++ } - numToBeDeleted++ case PhaseDeleting: // if object is gone, we can remove it from inventory if existingObject == nil { - continue + item.Phase = "" } else { numToBeDeleted++ } @@ -441,10 +594,17 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, panic("this cannot happen") } } - inventory = append(inventory, item) + + // trigger another reconcile if this is the last object of the wave, and some deletions are not yet completed + if k == len(status.Inventory)-1 || status.Inventory[k+1].DeleteOrder > item.DeleteOrder { + log.V(2).Info("end of deletion wave", "order", item.DeleteOrder) + if numToBeDeleted > 0 { + break + } + } } - status.Inventory = inventory + status.Inventory = slices.Select(status.Inventory, func(item *InventoryItem) bool { return item.Phase != "" }) // trigger another reconcile if numToBeDeleted > 0 { @@ -469,26 +629,31 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, } // put objects into right order for applying - objects = sortObjectsForApply(objects, getOrder) + objects = sortObjectsForApply(objects, getApplyOrder) - // apply objects and maintain inventory - numUnready := 0 + // apply objects and maintain inventory; + // objects are applied (i.e. created/updated) in waves according to their apply order; + // that means, only if all objects of a wave are ready or completed, the next wave + // will be procesed; within each wave, objects which are instances of managed types + // will be applied after all other objects numNotManagedToBeApplied := 0 + numUnready := 0 for k, object := range objects { - // retrieve object order - order := getOrder(object) - // retrieve inventory item corresponding to this object item := mustGetItem(status.Inventory, object) + // retrieve object order + applyOrder := getApplyOrder(object) + // if this is the first object of an order, then // count instances of managed types in this order which are about to be applied - if k == 0 || getOrder(objects[k-1]) < order { + if k == 0 || getApplyOrder(objects[k-1]) < applyOrder { + log.V(2).Info("begin of apply wave", "order", applyOrder) numNotManagedToBeApplied = 0 - for j := k; j < len(objects) && getOrder(objects[j]) == order; j++ { + for j := k; j < len(objects) && getApplyOrder(objects[j]) == applyOrder; j++ { _object := objects[j] _item := mustGetItem(status.Inventory, _object) - if _item.Phase != PhaseReady && _item.Phase != PhaseCompleted && !isManaged(status.Inventory, _object) { + if _item.Phase != PhaseReady && _item.Phase != PhaseCompleted && !isInstanceOfManagedType(status.Inventory, _object) { // that means: _item.Phase is one of PhaseScheduledForApplication, PhaseCreating, PhaseUpdating numNotManagedToBeApplied++ } @@ -497,7 +662,10 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, // for non-completed objects, compute and update status, and apply (create or update) the object if necessary if item.Phase != PhaseCompleted { - if numNotManagedToBeApplied == 0 || !isManaged(status.Inventory, object) { + // reconcile all instances of managed types after remaining objects + // this ensures that everything is running what is needed for the reconciliation of the managed instances, + // such as webhook servers, api servers, ... + if numNotManagedToBeApplied == 0 || !isInstanceOfManagedType(status.Inventory, object) { // fetch object (if existing) existingObject, err := t.readObject(ctx, item) if err != nil { @@ -516,14 +684,15 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, item.Status = kstatus.InProgressStatus.String() numUnready++ } else if existingObject.GetDeletionTimestamp().IsZero() && existingObject.GetAnnotations()[t.annotationKeyDigest] != item.Digest { - // note: this must() is ok because we checked the generated objects above - updatePolicy := must(t.getUpdatePolicy(object)) + updatePolicy := getUpdatePolicy(object) switch updatePolicy { case UpdatePolicyRecreate: + // TODO: perform an additional owner id check if err := t.deleteObject(ctx, object, existingObject); err != nil { return false, errors.Wrapf(err, "error deleting (while recreating) object %s", item) } default: + // TODO: perform an additional owner id check if err := t.updateObject(ctx, object, existingObject, nil, updatePolicy); err != nil { return false, errors.Wrapf(err, "error updating object %s", item) } @@ -532,16 +701,16 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, item.Status = kstatus.InProgressStatus.String() numUnready++ } else { - res, err := computeStatus(existingObject) + status, err := t.statusAnalyzer.ComputeStatus(existingObject) if err != nil { return false, errors.Wrapf(err, "error checking status of object %s", item) } - if existingObject.GetDeletionTimestamp().IsZero() && res.Status == kstatus.CurrentStatus { + if existingObject.GetDeletionTimestamp().IsZero() && status == kstatus.CurrentStatus { item.Phase = PhaseReady } else { numUnready++ } - item.Status = res.Status.String() + item.Status = status.String() } } else { numUnready++ @@ -553,14 +722,15 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, // if this is the last object of an order, then // - if everything so far is ready, trigger due completions and trigger another reconcile if any completion was triggered // - otherwise trigger another reconcile - if k == len(objects)-1 || getOrder(objects[k+1]) > order { + if k == len(objects)-1 || getApplyOrder(objects[k+1]) > applyOrder { + log.V(2).Info("end of apply wave", "order", applyOrder) if numUnready == 0 { numPurged := 0 for j := 0; j <= k; j++ { _object := objects[j] _item := mustGetItem(status.Inventory, _object) _purgeOrder := getPurgeOrder(_object) - if (k == len(objects)-1 && _purgeOrder < math.MaxInt || _purgeOrder <= order) && _item.Phase != PhaseCompleted { + if (k == len(objects)-1 && _purgeOrder <= maxOrder || _purgeOrder <= applyOrder) && _item.Phase != PhaseCompleted { _item.Phase = PhaseScheduledForCompletion numPurged++ } @@ -578,55 +748,81 @@ func (t *reconcileTarget[T]) Reconcile(ctx context.Context, component T) (bool, } func (t *reconcileTarget[T]) Delete(ctx context.Context, component T) (bool, error) { + log := log.FromContext(ctx) status := component.GetStatus() - // count instances of managed types - numManaged := 0 - for _, item := range status.Inventory { - if isManaged(status.Inventory, item) { - numManaged++ + // delete objects and maintain inventory; + // objects are deleted in waves according to their delete order; + // that means, only if all objects of a wave are gone, the next wave will be processed; + // within each wave, objects which are instances of managed types are deleted before all + // other objects, and namespaces will only be deleted if they are not used by any + // object in the inventory (note that this may cause deadlocks) + numManagedToBeDeleted := 0 + numToBeDeleted := 0 + for k, item := range status.Inventory { + // if this is the first object of an order, then + // count instances of managed types in this wave which are about to be deleted + if k == 0 || status.Inventory[k-1].DeleteOrder < item.DeleteOrder { + log.V(2).Info("begin of deletion wave", "order", item.DeleteOrder) + numManagedToBeDeleted = 0 + for j := k; j < len(status.Inventory) && status.Inventory[j].DeleteOrder == item.DeleteOrder; j++ { + _item := status.Inventory[j] + if isInstanceOfManagedType(status.Inventory, _item) { + numManagedToBeDeleted++ + } + } } - } - // delete objects and maintain inventory - // TODO: delete-order - var inventory []*InventoryItem - for _, item := range status.Inventory { // fetch object (if existing) existingObject, err := t.readObject(ctx, item) if err != nil { return false, errors.Wrapf(err, "error reading object %s", item) } - // if object is gone, we can remove it from inventory - if existingObject == nil && item.Phase == PhaseDeleting { - continue - } + orphan := item.DeletePolicy == DeletePolicyOrphan - if numManaged == 0 || isManaged(status.Inventory, item) { - // orphan the object, if according deletion policy is set - if existingObject != nil { - deletePolicy, err := t.getDeletePolicy(existingObject) - if err != nil { - // note: this should not happen under normal circumstances, because we checked the annotation when persisting it - return false, errors.Wrapf(err, "error validating existing object %s", types.ObjectKeyToString(existingObject)) - } - if deletePolicy == DeletePolicyOrphan { - continue + switch item.Phase { + case PhaseDeleting: + // if object is gone, we can remove it from inventory + if existingObject == nil { + item.Phase = "" + } else { + numToBeDeleted++ + } + default: + // delete namespaces after all contained inventory items + // delete all instances of managed types before remaining objects; this ensures that no objects are prematurely + // deleted which are needed for the deletion of the managed instances, such as webhook servers, api servers, ... + if (!isNamespace(item) || !isNamespaceUsed(status.Inventory, item.Name)) && (numManagedToBeDeleted == 0 || isInstanceOfManagedType(status.Inventory, item)) { + if orphan { + item.Phase = "" + } else { + // delete the object + // note: here is a theoretical risk that we delete an existing (foreign) object, because informers are not yet synced + // however not sending the delete request is also not an option, because this might lead to orphaned own dependents + // TODO: perform an additional owner id check + if err := t.deleteObject(ctx, item, existingObject); err != nil { + return false, errors.Wrapf(err, "error deleting object %s", item) + } + item.Phase = PhaseDeleting + item.Status = kstatus.TerminatingStatus.String() + numToBeDeleted++ } + } else { + numToBeDeleted++ } - // delete the object - // note: here is a theoretical risk that we delete an existing (foreign) object, because informers are not yet synced - // however not sending the delete request is also not an option, because this might lead to orphaned own dependents - if err := t.deleteObject(ctx, item, existingObject); err != nil { - return false, errors.Wrapf(err, "error deleting object %s", item) + } + + // trigger another reconcile if this is the last object of the wave, and some deletions are not yet completed + if k == len(status.Inventory)-1 || status.Inventory[k+1].DeleteOrder > item.DeleteOrder { + log.V(2).Info("end of deletion wave", "order", item.DeleteOrder) + if numToBeDeleted > 0 { + break } - item.Phase = PhaseDeleting - item.Status = kstatus.TerminatingStatus.String() } - inventory = append(inventory, item) } - status.Inventory = inventory + + status.Inventory = slices.Select(status.Inventory, func(item *InventoryItem) bool { return item.Phase != "" }) return len(status.Inventory) == 0, nil } @@ -752,7 +948,6 @@ func (t *reconcileTarget[T]) updateObject(ctx context.Context, object client.Obj // because replace will only claim fields which are new or which have changed; the field owner of declared (but unmodified) // fields will not be touched object.SetManagedFields(nil) - // note: this must() is ok because we checked the generated objects before switch updatePolicy { case UpdatePolicySsaMerge: return t.client.Patch(ctx, object, client.Apply, client.FieldOwner(t.reconcilerName), client.ForceOwnership) @@ -841,7 +1036,7 @@ func (t *reconcileTarget[T]) deleteObject(ctx context.Context, key types.ObjectK return fmt.Errorf("error deleting custom resource definition %s, existing instances found", types.ObjectKeyToString(key)) } if ok := controllerutil.RemoveFinalizer(crd, t.reconcilerName); ok { - // note: 409 error is very likely here (because of concurrent updates happening through the API server); this is why we retry once + // note: 409 error is very likely here (because of concurrent updates happening through the api server); this is why we retry once if err := t.client.Update(ctx, crd, client.FieldOwner(t.reconcilerName)); err != nil { if i == 1 && apierrors.IsConflict(err) { log.V(1).Info("error while updating CustomResourcedefinition (409 conflict); doing one retry", "name", t.reconcilerName, "error", err.Error()) @@ -866,7 +1061,7 @@ func (t *reconcileTarget[T]) deleteObject(ctx context.Context, key types.ObjectK return fmt.Errorf("error deleting api service %s, existing instances found", types.ObjectKeyToString(key)) } if ok := controllerutil.RemoveFinalizer(apiService, t.reconcilerName); ok { - // note: 409 error is very likely here (because of concurrent updates happening through the API server); this is why we retry once + // note: 409 error is very likely here (because of concurrent updates happening through the api server); this is why we retry once if err := t.client.Update(ctx, apiService, client.FieldOwner(t.reconcilerName)); err != nil { if i == 1 && apierrors.IsConflict(err) { log.V(1).Info("error while updating APIService (409 conflict); doing one retry", "name", t.reconcilerName, "error", err.Error()) @@ -882,7 +1077,7 @@ func (t *reconcileTarget[T]) deleteObject(ctx context.Context, key types.ObjectK } func (t *reconcileTarget[T]) getAdoptionPolicy(object client.Object) (AdoptionPolicy, error) { - adoptionPolicy := object.GetAnnotations()[t.annotationKeyAdoptionPolicy] + adoptionPolicy := strcase.ToKebab(object.GetAnnotations()[t.annotationKeyAdoptionPolicy]) switch adoptionPolicy { case "": return t.adoptionPolicy, nil @@ -894,7 +1089,7 @@ func (t *reconcileTarget[T]) getAdoptionPolicy(object client.Object) (AdoptionPo } func (t *reconcileTarget[T]) getReconcilePolicy(object client.Object) (ReconcilePolicy, error) { - reconcilePolicy := object.GetAnnotations()[t.annotationKeyReconcilePolicy] + reconcilePolicy := strcase.ToKebab(object.GetAnnotations()[t.annotationKeyReconcilePolicy]) switch reconcilePolicy { case "": return t.reconcilePolicy, nil @@ -906,7 +1101,7 @@ func (t *reconcileTarget[T]) getReconcilePolicy(object client.Object) (Reconcile } func (t *reconcileTarget[T]) getUpdatePolicy(object client.Object) (UpdatePolicy, error) { - updatePolicy := object.GetAnnotations()[t.annotationKeyUpdatePolicy] + updatePolicy := strcase.ToKebab(object.GetAnnotations()[t.annotationKeyUpdatePolicy]) switch updatePolicy { case "", types.UpdatePolicyDefault: return t.updatePolicy, nil @@ -918,7 +1113,7 @@ func (t *reconcileTarget[T]) getUpdatePolicy(object client.Object) (UpdatePolicy } func (t *reconcileTarget[T]) getDeletePolicy(object client.Object) (DeletePolicy, error) { - deletePolicy := object.GetAnnotations()[t.annotationKeyDeletePolicy] + deletePolicy := strcase.ToKebab(object.GetAnnotations()[t.annotationKeyDeletePolicy]) switch deletePolicy { case "", types.DeletePolicyDefault: return t.deletePolicy, nil @@ -929,36 +1124,51 @@ func (t *reconcileTarget[T]) getDeletePolicy(object client.Object) (DeletePolicy } } -func (t *reconcileTarget[T]) getOrder(object client.Object) (int, error) { - value, ok := object.GetAnnotations()[t.annotationKeyOrder] +func (t *reconcileTarget[T]) getApplyOrder(object client.Object) (int, error) { + value, ok := object.GetAnnotations()[t.annotationKeyApplyOrder] if !ok { return 0, nil } - order, err := strconv.Atoi(value) + applyOrder, err := strconv.Atoi(value) if err != nil { - return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyOrder, value) + return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyApplyOrder, value) } - if err := checkRange(order, math.MinInt16, math.MaxInt16); err != nil { - return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyOrder, value) + if err := checkRange(applyOrder, minOrder, maxOrder); err != nil { + return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyApplyOrder, value) } - return order, nil + return applyOrder, nil } func (t *reconcileTarget[T]) getPurgeOrder(object client.Object) (int, error) { value, ok := object.GetAnnotations()[t.annotationKeyPurgeOrder] if !ok { - return math.MaxInt, nil + return maxOrder + 1, nil } purgeOrder, err := strconv.Atoi(value) if err != nil { return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyPurgeOrder, value) } - if err := checkRange(purgeOrder, math.MinInt16, math.MaxInt16); err != nil { + if err := checkRange(purgeOrder, minOrder, maxOrder); err != nil { return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyPurgeOrder, value) } return purgeOrder, nil } +func (t *reconcileTarget[T]) getDeleteOrder(object client.Object) (int, error) { + value, ok := object.GetAnnotations()[t.annotationKeyDeleteOrder] + if !ok { + return 0, nil + } + deleteOrder, err := strconv.Atoi(value) + if err != nil { + return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyDeleteOrder, value) + } + if err := checkRange(deleteOrder, minOrder, maxOrder); err != nil { + return 0, errors.Wrapf(err, "invalid value for annotation %s: %s", t.annotationKeyDeleteOrder, value) + } + return deleteOrder, nil +} + func (t *reconcileTarget[T]) isCrdUsed(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, onlyForeign bool) (bool, error) { gvk := schema.GroupVersionKind{ Group: crd.Spec.Group, @@ -980,7 +1190,8 @@ func (t *reconcileTarget[T]) isCrdUsed(ctx context.Context, crd *apiextensionsv1 hashedOwnerId = crd.Labels[t.labelKeyOwnerId] legacyOwnerId = strings.Join(slices.Last(strings.Split(crd.Annotations[t.annotationKeyOwnerId], "/"), 2), "_") } - labelSelector = mustParseLabelSelector(t.labelKeyOwnerId + " notin (" + hashedOwnerId + "," + legacyOwnerId + ")") + // note: this must() is ok because the label selector string is static, and correct + labelSelector = must(labels.Parse(t.labelKeyOwnerId + " notin (" + hashedOwnerId + "," + legacyOwnerId + ")")) // labelSelector = mustParseLabelSelector(t.labelKeyOwnerId + "!=" + crd.Labels[t.labelKeyOwnerId]) } if err := t.client.List(ctx, list, &client.ListOptions{LabelSelector: labelSelector, Limit: 1}); err != nil { @@ -1013,7 +1224,8 @@ func (t *reconcileTarget[T]) isApiServiceUsed(ctx context.Context, apiService *a hashedOwnerId = apiService.Labels[t.labelKeyOwnerId] legacyOwnerId = strings.Join(slices.Last(strings.Split(apiService.Annotations[t.annotationKeyOwnerId], "/"), 2), "_") } - labelSelector = mustParseLabelSelector(t.labelKeyOwnerId + " notin (" + hashedOwnerId + "," + legacyOwnerId + ")") + // note: this must() is ok because the label selector string is static, and correct + labelSelector = must(labels.Parse(t.labelKeyOwnerId + " notin (" + hashedOwnerId + "," + legacyOwnerId + ")")) // labelSelector = mustParseLabelSelector(t.labelKeyOwnerId + "!=" + crd.Labels[t.labelKeyOwnerId]) } for _, kind := range kinds { diff --git a/pkg/component/types.go b/pkg/component/types.go index e6fb958..10a8a05 100644 --- a/pkg/component/types.go +++ b/pkg/component/types.go @@ -227,6 +227,56 @@ type NameInfo struct { Name string `json:"name"` } +// AdoptionPolicy defines how the reconciler reacts if a dependent object exists but has no or a different owner. +type AdoptionPolicy string + +const ( + // Fail if the dependent object exists but has no or a different owner. + AdoptionPolicyNever AdoptionPolicy = "Never" + // Adopt existing dependent objects if they have no owner set. + AdoptionPolicyIfUnowned AdoptionPolicy = "IfUnowned" + // Adopt existing dependent objects, even if they have a conflicting owner. + AdoptionPolicyAlways AdoptionPolicy = "Always" +) + +// ReconcilePolicy defines when the reconciler will reconcile the dependent object. +type ReconcilePolicy string + +const ( + // Reconcile the dependent object if its manifest, as produced by the generator, changes. + ReconcilePolicyOnObjectChange ReconcilePolicy = "OnObjectChange" + // Reconcile the dependent object if its manifest, as produced by the generator, changes, or if the owning + // component changes (identified by a change of its metadata.generation). + ReconcilePolicyOnObjectOrComponentChange ReconcilePolicy = "OnObjectOrComponentChange" + // Reconcile the dependent object only once; afterwards it will never be touched again by the reconciler. + ReconcilePolicyOnce ReconcilePolicy = "Once" +) + +// UpdatePolicy defines how the reconciler will update dependent objects. +type UpdatePolicy string + +const ( + // Recreate (that is: delete and create) existing dependent objects. + UpdatePolicyRecreate UpdatePolicy = "Recreate" + // Replace existing dependent objects. + UpdatePolicyReplace UpdatePolicy = "Replace" + // Use server side apply to update existing dependents. + UpdatePolicySsaMerge UpdatePolicy = "SsaMerge" + // Use server side apply to update existing dependents and, in addition, reclaim fields owned by certain + // field owners, such as kubectl or helm. + UpdatePolicySsaOverride UpdatePolicy = "SsaOverride" +) + +// DeletePolicy defines how the reconciler will delete dependent objects. +type DeletePolicy string + +const ( + // Delete dependent objects. + DeletePolicyDelete DeletePolicy = "Delete" + // Orphan dependent objects. + DeletePolicyOrphan DeletePolicy = "Orphan" +) + // +kubebuilder:object:generate=true // InventoryItem represents a dependent object managed by this operator. @@ -235,7 +285,19 @@ type InventoryItem struct { TypeInfo `json:",inline"` // Namespace and name of the dependent object. NameInfo `json:",inline"` - // Managed types + // Adoption policy. + AdoptionPolicy AdoptionPolicy `json:"adoptionPolicy"` + // Reconcile policy. + ReconcilePolicy ReconcilePolicy `json:"reconcilePolicy"` + // Update policy. + UpdatePolicy UpdatePolicy `json:"updatePolicy"` + // Delete policy. + DeletePolicy DeletePolicy `json:"deletePolicy"` + // Apply order. + ApplyOrder int `json:"applyOrder"` + // Delete order. + DeleteOrder int `json:"deleteOrder"` + // Managed types. ManagedTypes []TypeInfo `json:"managedTypes,omitempty"` // Digest of the descriptor of the dependent object. Digest string `json:"digest"` diff --git a/pkg/component/util.go b/pkg/component/util.go index 86a2804..a540ac6 100644 --- a/pkg/component/util.go +++ b/pkg/component/util.go @@ -18,7 +18,6 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -245,7 +244,7 @@ func sortObjectsForApply[T client.Object](s []T, orderFunc func(client.Object) i return slices.SortBy(s, f) } -func sortObjectsForDelete[T types.ObjectKey](s []T) []T { +func sortObjectsForDelete(inventory []*InventoryItem) []*InventoryItem { priority := map[string]int{ "CustomResourceDefinition.apiextensions.k8s.io": -1, "APIService.apiregistration.k8s.io": -1, @@ -260,12 +259,14 @@ func sortObjectsForDelete[T types.ObjectKey](s []T) []T { "PriorityClass.scheduling.k8s.io": 4, "StorageClass.storage.k8s.io": 4, } - f := func(x T, y T) bool { - gvx := x.GetObjectKind().GroupVersionKind().GroupKind().String() - gvy := y.GetObjectKind().GroupVersionKind().GroupKind().String() - return priority[gvx] > priority[gvy] + f := func(x *InventoryItem, y *InventoryItem) bool { + orderx := x.DeleteOrder + ordery := y.DeleteOrder + gvx := x.GroupVersionKind().GroupKind().String() + gvy := y.GroupVersionKind().GroupKind().String() + return orderx > ordery || orderx == ordery && priority[gvx] > priority[gvy] } - return slices.SortBy(s, f) + return slices.SortBy(inventory, f) } func getItem(inventory []*InventoryItem, key types.ObjectKey) *InventoryItem { @@ -291,22 +292,32 @@ func mustGetItem(inventory []*InventoryItem, key types.ObjectKey) *InventoryItem return item } -func isManaged(inventory []*InventoryItem, key types.TypeKey) bool { - gvk := key.GetObjectKind().GroupVersionKind() +func isNamespaceUsed(inventory []*InventoryItem, namespace string) bool { + // TODO: do not consider inventory items with certain Phases (e.g. Completed)? for _, item := range inventory { - for _, t := range item.ManagedTypes { - if (t.Group == "*" || t.Group == gvk.Group) && (t.Version == "*" || t.Version == gvk.Version) && (t.Kind == "*" || t.Kind == gvk.Kind) { - return true - } + if item.Namespace == namespace { + return true } } return false } -func mustParseLabelSelector(s string) labels.Selector { - selector, err := labels.Parse(s) - if err != nil { - panic("this cannot happen") +func isInstanceOfManagedType(inventory []*InventoryItem, key types.TypeKey) bool { + // TODO: do not consider inventory items with certain Phases (e.g. Completed)? + for _, item := range inventory { + if isManaged := isManagedBy(item, key); isManaged { + return true + } } - return selector + return false +} + +func isManagedBy(item *InventoryItem, key types.TypeKey) bool { + gvk := key.GetObjectKind().GroupVersionKind() + for _, t := range item.ManagedTypes { + if (t.Group == "*" || t.Group == gvk.Group) && (t.Version == "*" || t.Version == gvk.Version) && (t.Kind == "*" || t.Kind == gvk.Kind) { + return true + } + } + return false } diff --git a/pkg/component/zz_generated.deepcopy.go b/pkg/component/zz_generated.deepcopy.go index cb83782..1238587 100644 --- a/pkg/component/zz_generated.deepcopy.go +++ b/pkg/component/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and component-operator-runtime contributors @@ -381,7 +380,8 @@ func (in *SecretReference) DeepCopyInto(out *SecretReference) { if val == nil { (*out)[key] = nil } else { - in, out := &val, &outVal + inVal := (*in)[key] + in, out := &inVal, &outVal *out = make([]byte, len(*in)) copy(*out, *in) } diff --git a/pkg/manifests/helm/generator.go b/pkg/manifests/helm/generator.go index 8dc4c20..3c8af2c 100644 --- a/pkg/manifests/helm/generator.go +++ b/pkg/manifests/helm/generator.go @@ -230,7 +230,7 @@ func (g *HelmGenerator) Generate(ctx context.Context, namespace string, name str annotationKeyReconcilePolicy := reconcilerName + "/" + types.AnnotationKeySuffixReconcilePolicy annotationKeyUpdatePolicy := reconcilerName + "/" + types.AnnotationKeySuffixUpdatePolicy - annotationKeyOrder := reconcilerName + "/" + types.AnnotationKeySuffixOrder + annotationKeyApplyOrder := reconcilerName + "/" + types.AnnotationKeySuffixApplyOrder annotationKeyPurgeOrder := reconcilerName + "/" + types.AnnotationKeySuffixPurgeOrder data := make(map[string]any) @@ -346,31 +346,31 @@ func (g *HelmGenerator) Generate(ctx context.Context, namespace string, name str switch { case slices.Equal(slices.Sort(hookMetadata.Types), slices.Sort([]string{helm.HookTypePreInstall})): annotations[annotationKeyReconcilePolicy] = types.ReconcilePolicyOnce - annotations[annotationKeyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMaxWeight - 1) + annotations[annotationKeyApplyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMaxWeight - 1) if slices.Contains(hookMetadata.DeletePolicies, helm.HookDeletePolicyHookSucceeded) { annotations[annotationKeyPurgeOrder] = strconv.Itoa(-1) } case slices.Equal(slices.Sort(hookMetadata.Types), slices.Sort([]string{helm.HookTypePostInstall})): annotations[annotationKeyReconcilePolicy] = types.ReconcilePolicyOnce - annotations[annotationKeyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMinWeight + 1) + annotations[annotationKeyApplyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMinWeight + 1) if slices.Contains(hookMetadata.DeletePolicies, helm.HookDeletePolicyHookSucceeded) { annotations[annotationKeyPurgeOrder] = strconv.Itoa(helm.HookMaxWeight - helm.HookMinWeight + 1) } case slices.Equal(slices.Sort(hookMetadata.Types), slices.Sort([]string{helm.HookTypePreInstall, helm.HookTypePreUpgrade})): annotations[annotationKeyReconcilePolicy] = types.ReconcilePolicyOnObjectOrComponentChange - annotations[annotationKeyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMaxWeight - 1) + annotations[annotationKeyApplyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMaxWeight - 1) if slices.Contains(hookMetadata.DeletePolicies, helm.HookDeletePolicyHookSucceeded) { annotations[annotationKeyPurgeOrder] = strconv.Itoa(-1) } case slices.Equal(slices.Sort(hookMetadata.Types), slices.Sort([]string{helm.HookTypePostInstall, helm.HookTypePostUpgrade})): annotations[annotationKeyReconcilePolicy] = types.ReconcilePolicyOnObjectOrComponentChange - annotations[annotationKeyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMinWeight + 1) + annotations[annotationKeyApplyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMinWeight + 1) if slices.Contains(hookMetadata.DeletePolicies, helm.HookDeletePolicyHookSucceeded) { annotations[annotationKeyPurgeOrder] = strconv.Itoa(helm.HookMaxWeight - helm.HookMinWeight + 1) } case slices.Equal(slices.Sort(hookMetadata.Types), slices.Sort([]string{helm.HookTypePreInstall, helm.HookTypePreUpgrade, helm.HookTypePostInstall, helm.HookTypePostUpgrade})): annotations[annotationKeyReconcilePolicy] = types.ReconcilePolicyOnObjectOrComponentChange - annotations[annotationKeyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMaxWeight - 1) + annotations[annotationKeyApplyOrder] = strconv.Itoa(hookMetadata.Weight - helm.HookMaxWeight - 1) if slices.Contains(hookMetadata.DeletePolicies, helm.HookDeletePolicyHookSucceeded) { annotations[annotationKeyPurgeOrder] = strconv.Itoa(helm.HookMaxWeight - helm.HookMinWeight + 1) } diff --git a/pkg/types/constants.go b/pkg/types/constants.go index 446094d..7aa99d5 100644 --- a/pkg/types/constants.go +++ b/pkg/types/constants.go @@ -7,14 +7,16 @@ package types const ( LabelKeySuffixOwnerId = "owner-id" + AnnotationKeySuffixOwnerId = "owner-id" AnnotationKeySuffixDigest = "digest" AnnotationKeySuffixAdoptionPolicy = "adoption-policy" AnnotationKeySuffixReconcilePolicy = "reconcile-policy" AnnotationKeySuffixUpdatePolicy = "update-policy" AnnotationKeySuffixDeletePolicy = "delete-policy" - AnnotationKeySuffixOrder = "order" + AnnotationKeySuffixApplyOrder = "apply-order" AnnotationKeySuffixPurgeOrder = "purge-order" - AnnotationKeySuffixOwnerId = "owner-id" + AnnotationKeySuffixDeleteOrder = "delete-order" + AnnotationKeySuffixStatusHint = "status-hint" ) const ( @@ -42,3 +44,8 @@ const ( DeletePolicyDelete = "delete" DeletePolicyOrphan = "orphan" ) + +const ( + StatusHintHasObservedGeneration = "has-observed-generation" + StatusHintHasReadyCondition = "has-ready-condition" +) diff --git a/website/content/en/docs/concepts/dependents.md b/website/content/en/docs/concepts/dependents.md index 2d6d277..694fce4 100644 --- a/website/content/en/docs/concepts/dependents.md +++ b/website/content/en/docs/concepts/dependents.md @@ -20,7 +20,7 @@ instances of the managed custom resource definition in the cluster. In some special situations, it is desirable to have even more control on the lifecycle of the dependent objects. To support such cases, the `Generator` implementation can set the following annotations in the manifests of the dependents: - `mycomponent-operator.mydomain.io/adoption-policy`: defines how the reconciler reacts if the object exists but has no or a different owner; can be one of: - - `never`: fail if the object exists but has no or a different owner + - `never`: fail if the object exists and has no or a different owner - `if-unowned` (which is the default): adopt the object if it has no owner set - `always`: adopt the object, even if it has a conflicting owner - `mycomponent-operator.mydomain.io/reconcile-policy`: defines how the object is reconciled; can be one of: @@ -37,8 +37,12 @@ To support such cases, the `Generator` implementation can set the following anno - `default` (deprecated): equivalent to the annotation being unset (which means that the reconciler default will be used) - `delete` (which is the default): a delete call will be sent to the Kubernetes API server - `orphan`: the object will not be deleted, and it will be no longer tracked -- `mycomponent-operator.mydomain.io/order`: the order at which this object will be reconciled; dependents will be reconciled order by order; that is, objects of the same order will be deployed in the canonical order, and the controller will only proceed to the next order if all objects of previous orders are ready; specified orders can be negative or positive numbers between -32768 and 32767, objects with no explicit order set are treated as if they would specify order 0. -- `mycomponent-operator.mydomain.io/purge-order` (optional): the order by which this object will be purged +- `mycomponent-operator.mydomain.io/apply-order`: the wave in which this object will be reconciled; dependents will be reconciled wave by wave; that is, objects of the same wave will be deployed in a canonical order, and the reconciler will only proceed to the next wave if all objects of previous waves are ready; specified orders can be negative or positive numbers between -32768 and 32767, objects with no explicit order set are treated as if they would specify order 0 +- `mycomponent-operator.mydomain.io/purge-order` (optional): the wave by which this object will be purged; here, purged means that, while applying the dependents, the object will be deleted from the cluster at the end of the specified wave; the according record in `status.Inventory` will be set to phase `Completed`; setting purge orders is useful to spawn ad-hoc objects during the reconcilation, which are not permanently needed; so it's comparable to helm hooks, in a certain sense +- `mycomponent-operator.mydomain.io/delete-order` (optional): the wave by which this object will be deleted; that is, if the dependent is no longer part of the component, or if the whole component is being deleted; dependents will be deleted wave by wave; that is, objects of the same wave will be deleted in a canonical order, and the reconciler will only proceed to the next wave if all objects of previous saves are gone; specified orders can be negative or positive numbers between -32768 and 32767, objects with no explicit order set are treated as if they would specify order 0; note that the delete order is completely independent of the apply order +- `mycomponent-operator.mydomain.io/status-hint` (optional): a comma-separated list of hints that may help the framework to properly identify the state of the annotated dependent object; currently, the following hints are possible: + - `has-observed-generation`: tells the framework that the dependent object has a `status.observedGeneration` field, even if it is not (yet) set by the responsible controller (some controllers are known to set the observed generation lazily, with the consequence that there is a period right after creation of the dependent object, where the field is missing in the dependent's status) + - `has-ready-condition`: tells the framework to count with a ready condition; if it is absent, the condition state will be considered as `Unknown` Note that, in the above paragraph, `mycomponent-operator.mydomain.io` has to be replaced with whatever was passed as `name` when calling `NewReconciler()`.