From 06026ca3c36be9785398e0d1efbe60f062b611e4 Mon Sep 17 00:00:00 2001 From: Michal Pryc Date: Thu, 14 Nov 2024 10:35:05 +0100 Subject: [PATCH 1/4] Generated scaffold for the NonAdminBackupStorageLocation The scaffold was generated using operator-sdk version 1.37.0: $ operator-sdk create api --version v1alpha1 --kind NonAdminBackupStorageLocation --group oadp Signed-off-by: Michal Pryc --- PROJECT | 9 ++ .../nonadminbackupstoragelocation_types.go | 64 +++++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 89 +++++++++++++++++++ cmd/main.go | 9 ++ config/crd/kustomization.yaml | 2 + config/rbac/kustomization.yaml | 6 ++ ...dminbackupstoragelocation_editor_role.yaml | 27 ++++++ ...dminbackupstoragelocation_viewer_role.yaml | 23 +++++ config/samples/kustomization.yaml | 1 + ...1alpha1_nonadminbackupstoragelocation.yaml | 9 ++ ...onadminbackupstoragelocation_controller.go | 62 +++++++++++++ ...inbackupstoragelocation_controller_test.go | 84 +++++++++++++++++ internal/controller/suite_test.go | 3 + 13 files changed, 388 insertions(+) create mode 100644 api/v1alpha1/nonadminbackupstoragelocation_types.go create mode 100644 config/rbac/nonadminbackupstoragelocation_editor_role.yaml create mode 100644 config/rbac/nonadminbackupstoragelocation_viewer_role.yaml create mode 100644 config/samples/oadp_v1alpha1_nonadminbackupstoragelocation.yaml create mode 100644 internal/controller/nonadminbackupstoragelocation_controller.go create mode 100644 internal/controller/nonadminbackupstoragelocation_controller_test.go diff --git a/PROJECT b/PROJECT index c0925ec..1c986ca 100644 --- a/PROJECT +++ b/PROJECT @@ -17,4 +17,13 @@ resources: kind: NonAdminBackup path: github.com/migtools/oadp-non-admin/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openshift.io + group: oadp + kind: NonAdminBackupStorageLocation + path: github.com/migtools/oadp-non-admin/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1alpha1/nonadminbackupstoragelocation_types.go b/api/v1alpha1/nonadminbackupstoragelocation_types.go new file mode 100644 index 0000000..ddd33fa --- /dev/null +++ b/api/v1alpha1/nonadminbackupstoragelocation_types.go @@ -0,0 +1,64 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NonAdminBackupStorageLocationSpec defines the desired state of NonAdminBackupStorageLocation +type NonAdminBackupStorageLocationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NonAdminBackupStorageLocation. Edit nonadminbackupstoragelocation_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NonAdminBackupStorageLocationStatus defines the observed state of NonAdminBackupStorageLocation +type NonAdminBackupStorageLocationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// NonAdminBackupStorageLocation is the Schema for the nonadminbackupstoragelocations API +type NonAdminBackupStorageLocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NonAdminBackupStorageLocationSpec `json:"spec,omitempty"` + Status NonAdminBackupStorageLocationStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// NonAdminBackupStorageLocationList contains a list of NonAdminBackupStorageLocation +type NonAdminBackupStorageLocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NonAdminBackupStorageLocation `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NonAdminBackupStorageLocation{}, &NonAdminBackupStorageLocationList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0efd366..62bbed8 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -137,6 +137,95 @@ func (in *NonAdminBackupStatus) DeepCopy() *NonAdminBackupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonAdminBackupStorageLocation) DeepCopyInto(out *NonAdminBackupStorageLocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminBackupStorageLocation. +func (in *NonAdminBackupStorageLocation) DeepCopy() *NonAdminBackupStorageLocation { + if in == nil { + return nil + } + out := new(NonAdminBackupStorageLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NonAdminBackupStorageLocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonAdminBackupStorageLocationList) DeepCopyInto(out *NonAdminBackupStorageLocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NonAdminBackupStorageLocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminBackupStorageLocationList. +func (in *NonAdminBackupStorageLocationList) DeepCopy() *NonAdminBackupStorageLocationList { + if in == nil { + return nil + } + out := new(NonAdminBackupStorageLocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NonAdminBackupStorageLocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonAdminBackupStorageLocationSpec) DeepCopyInto(out *NonAdminBackupStorageLocationSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminBackupStorageLocationSpec. +func (in *NonAdminBackupStorageLocationSpec) DeepCopy() *NonAdminBackupStorageLocationSpec { + if in == nil { + return nil + } + out := new(NonAdminBackupStorageLocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonAdminBackupStorageLocationStatus) DeepCopyInto(out *NonAdminBackupStorageLocationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminBackupStorageLocationStatus. +func (in *NonAdminBackupStorageLocationStatus) DeepCopy() *NonAdminBackupStorageLocationStatus { + if in == nil { + return nil + } + out := new(NonAdminBackupStorageLocationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VeleroBackup) DeepCopyInto(out *VeleroBackup) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index f364ca3..f2e8213 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -52,6 +53,7 @@ func init() { utilruntime.Must(nacv1alpha1.AddToScheme(scheme)) utilruntime.Must(velerov1.AddToScheme(scheme)) + utilruntime.Must(oadpv1alpha1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -140,6 +142,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "NonAdminBackup") os.Exit(1) } + if err = (&controller.NonAdminBackupStorageLocationReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "NonAdminBackupStorageLocation") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 0430ad8..14a0d98 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,6 +3,7 @@ # It should be run by config/default resources: - bases/oadp.openshift.io_nonadminbackups.yaml +- bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: @@ -14,6 +15,7 @@ patches: # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD #- path: patches/cainjection_in_nonadminbackups.yaml +#- path: patches/cainjection_in_nonadminbackupstoragelocations.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # [WEBHOOK] To enable webhook, uncomment the following section diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 2f853d7..3ae75ee 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -16,3 +16,9 @@ resources: # - auth_proxy_role.yaml # - auth_proxy_role_binding.yaml # - auth_proxy_client_clusterrole.yaml +# For each CRD, "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the Project itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- nonadminbackupstoragelocation_editor_role.yaml +- nonadminbackupstoragelocation_viewer_role.yaml diff --git a/config/rbac/nonadminbackupstoragelocation_editor_role.yaml b/config/rbac/nonadminbackupstoragelocation_editor_role.yaml new file mode 100644 index 0000000..f63cffe --- /dev/null +++ b/config/rbac/nonadminbackupstoragelocation_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit nonadminbackupstoragelocations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: oadp-nac + app.kubernetes.io/managed-by: kustomize + name: nonadminbackupstoragelocation-editor-role +rules: +- apiGroups: + - oadp.openshift.io + resources: + - nonadminbackupstoragelocations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oadp.openshift.io + resources: + - nonadminbackupstoragelocations/status + verbs: + - get diff --git a/config/rbac/nonadminbackupstoragelocation_viewer_role.yaml b/config/rbac/nonadminbackupstoragelocation_viewer_role.yaml new file mode 100644 index 0000000..05a59fd --- /dev/null +++ b/config/rbac/nonadminbackupstoragelocation_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view nonadminbackupstoragelocations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: oadp-nac + app.kubernetes.io/managed-by: kustomize + name: nonadminbackupstoragelocation-viewer-role +rules: +- apiGroups: + - oadp.openshift.io + resources: + - nonadminbackupstoragelocations + verbs: + - get + - list + - watch +- apiGroups: + - oadp.openshift.io + resources: + - nonadminbackupstoragelocations/status + verbs: + - get diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 05ac669..4bbea82 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,5 @@ ## Append samples of your project ## resources: - oadp_v1alpha1_nonadminbackup.yaml +- oadp_v1alpha1_nonadminbackupstoragelocation.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/oadp_v1alpha1_nonadminbackupstoragelocation.yaml b/config/samples/oadp_v1alpha1_nonadminbackupstoragelocation.yaml new file mode 100644 index 0000000..2c6d5f7 --- /dev/null +++ b/config/samples/oadp_v1alpha1_nonadminbackupstoragelocation.yaml @@ -0,0 +1,9 @@ +apiVersion: oadp.openshift.io/v1alpha1 +kind: NonAdminBackupStorageLocation +metadata: + labels: + app.kubernetes.io/name: oadp-nac + app.kubernetes.io/managed-by: kustomize + name: nonadminbackupstoragelocation-sample +spec: + # TODO(user): Add fields here diff --git a/internal/controller/nonadminbackupstoragelocation_controller.go b/internal/controller/nonadminbackupstoragelocation_controller.go new file mode 100644 index 0000000..154c0ef --- /dev/null +++ b/internal/controller/nonadminbackupstoragelocation_controller.go @@ -0,0 +1,62 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + oadpv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" +) + +// NonAdminBackupStorageLocationReconciler reconciles a NonAdminBackupStorageLocation object +type NonAdminBackupStorageLocationReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the NonAdminBackupStorageLocation object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.17.3/pkg/reconcile +func (r *NonAdminBackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NonAdminBackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&oadpv1alpha1.NonAdminBackupStorageLocation{}). + Complete(r) +} diff --git a/internal/controller/nonadminbackupstoragelocation_controller_test.go b/internal/controller/nonadminbackupstoragelocation_controller_test.go new file mode 100644 index 0000000..3bcec3c --- /dev/null +++ b/internal/controller/nonadminbackupstoragelocation_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + oadpv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" +) + +var _ = Describe("NonAdminBackupStorageLocation Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + nonadminbackupstoragelocation := &oadpv1alpha1.NonAdminBackupStorageLocation{} + + BeforeEach(func() { + By("creating the custom resource for the Kind NonAdminBackupStorageLocation") + err := k8sClient.Get(ctx, typeNamespacedName, nonadminbackupstoragelocation) + if err != nil && errors.IsNotFound(err) { + resource := &oadpv1alpha1.NonAdminBackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &oadpv1alpha1.NonAdminBackupStorageLocation{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance NonAdminBackupStorageLocation") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &NonAdminBackupStorageLocationReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 4f99475..20e154f 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -79,6 +79,9 @@ var _ = ginkgo.BeforeSuite(func() { err = velerov1.AddToScheme(scheme.Scheme) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oadpv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) From d42f1b945ccbb2f82bceb237559822720216a351 Mon Sep 17 00:00:00 2001 From: Michal Pryc Date: Thu, 14 Nov 2024 10:35:58 +0100 Subject: [PATCH 2/4] Generated manifests after scaffold of NABSL Second step to generate scaffold folder for the NABSL controller with the $ make manifests command Used controller-gen-v0.14.0 for the above. Signed-off-by: Michal Pryc --- ...ift.io_nonadminbackupstoragelocations.yaml | 57 +++++++++++++++++++ config/rbac/role.yaml | 26 +++++++++ 2 files changed, 83 insertions(+) create mode 100644 config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml diff --git a/config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml b/config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml new file mode 100644 index 0000000..51181b2 --- /dev/null +++ b/config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: nonadminbackupstoragelocations.oadp.openshift.io +spec: + group: oadp.openshift.io + names: + kind: NonAdminBackupStorageLocation + listKind: NonAdminBackupStorageLocationList + plural: nonadminbackupstoragelocations + singular: nonadminbackupstoragelocation + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NonAdminBackupStorageLocation is the Schema for the nonadminbackupstoragelocations + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NonAdminBackupStorageLocationSpec defines the desired state + of NonAdminBackupStorageLocation + properties: + foo: + description: Foo is an example field of NonAdminBackupStorageLocation. + Edit nonadminbackupstoragelocation_types.go to remove/update + type: string + type: object + status: + description: NonAdminBackupStorageLocationStatus defines the observed + state of NonAdminBackupStorageLocation + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 3471f6a..627c403 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -30,6 +30,32 @@ rules: - get - patch - update +- apiGroups: + - oadp.openshift.io + resources: + - nonadminbackupstoragelocations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oadp.openshift.io + resources: + - nonadminbackupstoragelocations/finalizers + verbs: + - update +- apiGroups: + - oadp.openshift.io + resources: + - nonadminbackupstoragelocations/status + verbs: + - get + - patch + - update - apiGroups: - velero.io resources: From 408f7752c314f98aa74699650233067d86b043ce Mon Sep 17 00:00:00 2001 From: Michal Pryc Date: Thu, 14 Nov 2024 10:45:28 +0100 Subject: [PATCH 3/4] Rename some of the imports to be consistent from oadpv1alpha1 to nacv1alpha1 Signed-off-by: Michal Pryc --- cmd/main.go | 1 - .../nonadminbackupstoragelocation_controller.go | 4 ++-- .../nonadminbackupstoragelocation_controller_test.go | 8 ++++---- internal/controller/suite_test.go | 3 --- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index f2e8213..6dab8e0 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -53,7 +53,6 @@ func init() { utilruntime.Must(nacv1alpha1.AddToScheme(scheme)) utilruntime.Must(velerov1.AddToScheme(scheme)) - utilruntime.Must(oadpv1alpha1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } diff --git a/internal/controller/nonadminbackupstoragelocation_controller.go b/internal/controller/nonadminbackupstoragelocation_controller.go index 154c0ef..9607464 100644 --- a/internal/controller/nonadminbackupstoragelocation_controller.go +++ b/internal/controller/nonadminbackupstoragelocation_controller.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - oadpv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" + nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" ) // NonAdminBackupStorageLocationReconciler reconciles a NonAdminBackupStorageLocation object @@ -57,6 +57,6 @@ func (r *NonAdminBackupStorageLocationReconciler) Reconcile(ctx context.Context, // SetupWithManager sets up the controller with the Manager. func (r *NonAdminBackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&oadpv1alpha1.NonAdminBackupStorageLocation{}). + For(&nacv1alpha1.NonAdminBackupStorageLocation{}). Complete(r) } diff --git a/internal/controller/nonadminbackupstoragelocation_controller_test.go b/internal/controller/nonadminbackupstoragelocation_controller_test.go index 3bcec3c..ff541d6 100644 --- a/internal/controller/nonadminbackupstoragelocation_controller_test.go +++ b/internal/controller/nonadminbackupstoragelocation_controller_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - oadpv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" + nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" ) var _ = Describe("NonAdminBackupStorageLocation Controller", func() { @@ -40,13 +40,13 @@ var _ = Describe("NonAdminBackupStorageLocation Controller", func() { Name: resourceName, Namespace: "default", // TODO(user):Modify as needed } - nonadminbackupstoragelocation := &oadpv1alpha1.NonAdminBackupStorageLocation{} + nonadminbackupstoragelocation := &nacv1alpha1.NonAdminBackupStorageLocation{} BeforeEach(func() { By("creating the custom resource for the Kind NonAdminBackupStorageLocation") err := k8sClient.Get(ctx, typeNamespacedName, nonadminbackupstoragelocation) if err != nil && errors.IsNotFound(err) { - resource := &oadpv1alpha1.NonAdminBackupStorageLocation{ + resource := &nacv1alpha1.NonAdminBackupStorageLocation{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName, Namespace: "default", @@ -59,7 +59,7 @@ var _ = Describe("NonAdminBackupStorageLocation Controller", func() { AfterEach(func() { // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &oadpv1alpha1.NonAdminBackupStorageLocation{} + resource := &nacv1alpha1.NonAdminBackupStorageLocation{} err := k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 20e154f..4f99475 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -79,9 +79,6 @@ var _ = ginkgo.BeforeSuite(func() { err = velerov1.AddToScheme(scheme.Scheme) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = oadpv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) From 5464b9816330f7bffe0ee92dba2de354341a675c Mon Sep 17 00:00:00 2001 From: Michal Pryc Date: Mon, 18 Nov 2024 15:37:49 +0100 Subject: [PATCH 4/4] NonAdminBackup BSL - WIP Signed-off-by: Michal Pryc --- .../nonadminbackupstoragelocation_types.go | 63 +- api/v1alpha1/nonadmincontroller_types.go | 8 +- api/v1alpha1/zz_generated.deepcopy.go | 37 +- cmd/main.go | 8 +- ...ift.io_nonadminbackupstoragelocations.yaml | 220 +++++- config/rbac/role.yaml | 32 + .../design/non_admin_backupstoragelocation.md | 116 ++++ internal/common/constant/constant.go | 26 +- internal/common/function/function.go | 78 ++- internal/common/function/function_test.go | 4 +- .../controller/nonadminbackup_controller.go | 55 +- .../nonadminbackup_controller_test.go | 6 +- ...onadminbackupstoragelocation_controller.go | 646 +++++++++++++++++- ...inbackupstoragelocation_controller_test.go | 84 --- .../velerobackupstoragelocation_handler.go | 63 ++ .../predicate/composite_nabsl_predicate.go | 71 ++ ...nonadminbackupstoragelocation_predicate.go | 58 ++ .../velerobackupstoragelocation_predicate.go | 47 ++ 18 files changed, 1476 insertions(+), 146 deletions(-) create mode 100644 docs/design/non_admin_backupstoragelocation.md delete mode 100644 internal/controller/nonadminbackupstoragelocation_controller_test.go create mode 100644 internal/handler/velerobackupstoragelocation_handler.go create mode 100644 internal/predicate/composite_nabsl_predicate.go create mode 100644 internal/predicate/nonadminbackupstoragelocation_predicate.go create mode 100644 internal/predicate/velerobackupstoragelocation_predicate.go diff --git a/api/v1alpha1/nonadminbackupstoragelocation_types.go b/api/v1alpha1/nonadminbackupstoragelocation_types.go index ddd33fa..f100edc 100644 --- a/api/v1alpha1/nonadminbackupstoragelocation_types.go +++ b/api/v1alpha1/nonadminbackupstoragelocation_types.go @@ -17,31 +17,75 @@ limitations under the License. package v1alpha1 import ( + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// NonAdminBackupStorageLocationPhase is a simple one high-level summary of the lifecycle of an NonAdminBackupStorageLocation. +// +kubebuilder:validation:Enum=New;Available;Unavailable;Created;Deleting +type NonAdminBackupStorageLocationPhase string + +// NonAdminBackupStorageLocationPhase constants similar to velerov1.BackupStorageLocationPhase +const ( + NaBSLPhaseNew NonAdminBackupStorageLocationPhase = "New" + NaBSLPhaseAvailable NonAdminBackupStorageLocationPhase = "Available" + NaBSLPhaseUnavailable NonAdminBackupStorageLocationPhase = "Unavailable" + NaBSLPhaseCreated NonAdminBackupStorageLocationPhase = "Created" + NaBSLPhaseDeleting NonAdminBackupStorageLocationPhase = "Deleting" +) + +// NonAdminBSLCondition contains addition conditions to the +// generic ones defined as NonAdminCondition +// +kubebuilder:validation:Enum=SecretSynced;BSLSynced +type NonAdminBSLCondition string + +// Predefined NonAdminBSLConditions +const ( + NonAdminBSLConditionSecretSynced NonAdminBSLCondition = "SecretSynced" + NonAdminBSLConditionBSLSynced NonAdminBSLCondition = "BackupStorageLocationSynced" +) + // NonAdminBackupStorageLocationSpec defines the desired state of NonAdminBackupStorageLocation type NonAdminBackupStorageLocationSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Embeds the Velero BackupStorageLocationSpec to inherit all fields + velerov1.BackupStorageLocationSpec `json:",inline"` +} + +// VeleroBackupStorageLocation contains information of the related Velero backup object. +type VeleroBackupStorageLocation struct { + // status captures the current status of the Velero backup storage location. + // +optional + Status *velerov1.BackupStorageLocationStatus `json:"status,omitempty"` + + // nacuuid references the Velero BackupStorageLocation object by it's label containing same NACUUID. + // +optional + NACUUID string `json:"nacuuid,omitempty"` + + // references the Velero BackupStorageLocation object by it's name. + // +optional + Name string `json:"name,omitempty"` - // Foo is an example field of NonAdminBackupStorageLocation. Edit nonadminbackupstoragelocation_types.go to remove/update - Foo string `json:"foo,omitempty"` + // namespace references the Namespace in which Velero backup storage location exists. + // +optional + Namespace string `json:"namespace,omitempty"` } // NonAdminBackupStorageLocationStatus defines the observed state of NonAdminBackupStorageLocation type NonAdminBackupStorageLocationStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file -} + // +optional + VeleroBackupStorageLocation *VeleroBackupStorageLocation `json:"veleroBackupStorageLocation,omitempty"` -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status + Phase NonAdminBackupStorageLocationPhase `json:"phase,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` +} // NonAdminBackupStorageLocation is the Schema for the nonadminbackupstoragelocations API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status type NonAdminBackupStorageLocation struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -50,9 +94,8 @@ type NonAdminBackupStorageLocation struct { Status NonAdminBackupStorageLocationStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true - // NonAdminBackupStorageLocationList contains a list of NonAdminBackupStorageLocation +// +kubebuilder:object:root=true type NonAdminBackupStorageLocationList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/api/v1alpha1/nonadmincontroller_types.go b/api/v1alpha1/nonadmincontroller_types.go index 0c49b64..d12f876 100644 --- a/api/v1alpha1/nonadmincontroller_types.go +++ b/api/v1alpha1/nonadmincontroller_types.go @@ -20,10 +20,10 @@ package v1alpha1 // +kubebuilder:validation:Enum=Accepted;Queued;Deleting type NonAdminCondition string -// Predefined conditions for NonAdminBackup. -// One NonAdminBackup object may have multiple conditions. -// It is more granular knowledge of the NonAdminBackup object and represents the -// array of the conditions through which the NonAdminBackup has or has not passed +// Predefined conditions for NonAdminController objects. +// One NonAdminController object may have multiple conditions. +// It is more granular knowledge of the NonAdminController object and represents the +// array of the conditions through which the NonAdminController has or has not passed const ( NonAdminConditionAccepted NonAdminCondition = "Accepted" NonAdminConditionQueued NonAdminCondition = "Queued" diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 62bbed8..5b222d1 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -142,8 +142,8 @@ func (in *NonAdminBackupStorageLocation) DeepCopyInto(out *NonAdminBackupStorage *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminBackupStorageLocation. @@ -199,6 +199,7 @@ func (in *NonAdminBackupStorageLocationList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NonAdminBackupStorageLocationSpec) DeepCopyInto(out *NonAdminBackupStorageLocationSpec) { *out = *in + in.BackupStorageLocationSpec.DeepCopyInto(&out.BackupStorageLocationSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminBackupStorageLocationSpec. @@ -214,6 +215,18 @@ func (in *NonAdminBackupStorageLocationSpec) DeepCopy() *NonAdminBackupStorageLo // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NonAdminBackupStorageLocationStatus) DeepCopyInto(out *NonAdminBackupStorageLocationStatus) { *out = *in + if in.VeleroBackupStorageLocation != nil { + in, out := &in.VeleroBackupStorageLocation, &out.VeleroBackupStorageLocation + *out = new(VeleroBackupStorageLocation) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminBackupStorageLocationStatus. @@ -246,6 +259,26 @@ func (in *VeleroBackup) DeepCopy() *VeleroBackup { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VeleroBackupStorageLocation) DeepCopyInto(out *VeleroBackupStorageLocation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(v1.BackupStorageLocationStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeleroBackupStorageLocation. +func (in *VeleroBackupStorageLocation) DeepCopy() *VeleroBackupStorageLocation { + if in == nil { + return nil + } + out := new(VeleroBackupStorageLocation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VeleroDeleteBackupRequest) DeepCopyInto(out *VeleroDeleteBackupRequest) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 6dab8e0..c31c687 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -27,9 +27,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -142,8 +139,9 @@ func main() { os.Exit(1) } if err = (&controller.NonAdminBackupStorageLocationReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + OADPNamespace: oadpNamespace, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "NonAdminBackupStorageLocation") os.Exit(1) diff --git a/config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml b/config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml index 51181b2..5be934a 100644 --- a/config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml +++ b/config/crd/bases/oadp.openshift.io_nonadminbackupstoragelocations.yaml @@ -41,14 +41,228 @@ spec: description: NonAdminBackupStorageLocationSpec defines the desired state of NonAdminBackupStorageLocation properties: - foo: - description: Foo is an example field of NonAdminBackupStorageLocation. - Edit nonadminbackupstoragelocation_types.go to remove/update + accessMode: + description: AccessMode defines the permissions for the backup storage + location. + enum: + - ReadOnly + - ReadWrite type: string + backupSyncPeriod: + description: BackupSyncPeriod defines how frequently to sync backup + API objects from object storage. A value of 0 disables sync. + nullable: true + type: string + config: + additionalProperties: + type: string + description: Config is for provider-specific configuration fields. + type: object + credential: + description: Credential contains the credential information intended + to be used with this location + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + description: Default indicates this location is the default backup + storage location. + type: boolean + objectStorage: + description: ObjectStorageLocation specifies the settings necessary + to connect to a provider's object storage. + properties: + bucket: + description: Bucket is the bucket to use for object storage. + type: string + caCert: + description: CACert defines a CA bundle to use when verifying + TLS connections to the provider. + format: byte + type: string + prefix: + description: Prefix is the path inside a bucket to use for Velero + storage. Optional. + type: string + required: + - bucket + type: object + provider: + description: Provider is the provider of the backup storage. + type: string + validationFrequency: + description: ValidationFrequency defines how frequently to validate + the corresponding object storage. A value of 0 disables validation. + nullable: true + type: string + required: + - objectStorage + - provider type: object status: description: NonAdminBackupStorageLocationStatus defines the observed state of NonAdminBackupStorageLocation + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + phase: + description: NonAdminBackupStorageLocationPhase is a simple one high-level + summary of the lifecycle of an NonAdminBackupStorageLocation. + enum: + - New + - Available + - Unavailable + - Created + - Deleting + type: string + veleroBackupStorageLocation: + description: 'Important: Run "make" to regenerate code after modifying + this file' + properties: + nacuuid: + description: nacuuid references the Velero BackupStorageLocation + object by it's label containing same NACUUID. + type: string + name: + description: references the Velero BackupStorageLocation object + by it's name. + type: string + namespace: + description: namespace references the Namespace in which Velero + backup storage location exists. + type: string + status: + description: status captures the current status of the Velero + backup storage location. + properties: + accessMode: + description: |- + AccessMode is an unused field. + + + Deprecated: there is now an AccessMode field on the Spec and this field + will be removed entirely as of v2.0. + enum: + - ReadOnly + - ReadWrite + type: string + lastSyncedRevision: + description: |- + LastSyncedRevision is the value of the `metadata/revision` file in the backup + storage location the last time the BSL's contents were synced into the cluster. + + + Deprecated: this field is no longer updated or used for detecting changes to + the location's contents and will be removed entirely in v2.0. + type: string + lastSyncedTime: + description: |- + LastSyncedTime is the last time the contents of the location were synced into + the cluster. + format: date-time + nullable: true + type: string + lastValidationTime: + description: |- + LastValidationTime is the last time the backup store location was validated + the cluster. + format: date-time + nullable: true + type: string + message: + description: Message is a message about the backup storage + location's status. + type: string + phase: + description: Phase is the current state of the BackupStorageLocation. + enum: + - Available + - Unavailable + type: string + type: object + type: object type: object type: object served: true diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 627c403..8b97d6d 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,6 +4,18 @@ kind: ClusterRole metadata: name: non-admin-controller-role rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - oadp.openshift.io resources: @@ -68,6 +80,26 @@ rules: - patch - update - watch +- apiGroups: + - velero.io + resources: + - backupstoragelocations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - velero.io + resources: + - backupstoragelocations/status + verbs: + - get + - patch + - update - apiGroups: - velero.io resources: diff --git a/docs/design/non_admin_backupstoragelocation.md b/docs/design/non_admin_backupstoragelocation.md new file mode 100644 index 0000000..a1666b8 --- /dev/null +++ b/docs/design/non_admin_backupstoragelocation.md @@ -0,0 +1,116 @@ +# NonAdminBackupStorageLocation Controller Design + +## Overview +The `NonAdminBackupStorageLocation` controller is responsible for managing backup storage locations requested by non-admin users in a multi-tenant Kubernetes environment. It ensures that users can only access and manage backup storage locations within their authorized namespaces while maintaining security boundaries. + +## Architecture + +```mermaid +%%{init: {'theme':'neutral'}}%% +flowchart TD + title[Non-Admin BSL Controller Workflow] + style title font-size:24px,font-weight:bold,fill:#e6f3ff,stroke:#666,stroke-width:2px,stroke-dasharray: 0 + + %% Start + START[**Start NaBSL Reconciliation**] --> OPERATION[**Determine Operation Type**] + + %% Create/Update Flow + OPERATION -->|**Create/Update**| VALIDATE_CONFIG{Validate Non-Admin BSL Config} + VALIDATE_CONFIG -->|Invalid| INVALID_CONFIG[Set Phase: Invalid] + VALIDATE_CONFIG -->|Valid| GENERATE_UUID[Generate NaBSL UUID and Store in Status] + + GENERATE_UUID --> CREATE_OR_UPDATE_SECRET[Create/Update Secret in OADP Namespace] + CREATE_OR_UPDATE_SECRET --> CREATE_OR_UPDATE_BSL[Create/Update Velero BSL Resource in OADP Namespace] + CREATE_OR_UPDATE_BSL --> UPDATE_STATUS[Update NaBSL Status with Velero BSL Info] + + %% Delete Flow + OPERATION -->|**Delete**| CHECK_SECRET_EXISTS{Check if Secret Exists} + CHECK_SECRET_EXISTS -->|Yes| DELETE_SECRET[Delete Secret in OADP Namespace] + CHECK_SECRET_EXISTS -->|No| CHECK_BSL_EXISTS{Check if Velero BSL Exists} + + DELETE_SECRET --> CHECK_BSL_EXISTS + CHECK_BSL_EXISTS -->|Yes| DELETE_BSL[Delete Velero BSL Resource in OADP Namespace] + CHECK_BSL_EXISTS -->|No| REMOVE_FINALIZER[Remove Finalizer from NaBSL Resource] + + DELETE_BSL --> REMOVE_FINALIZER + + %% Endpoints + INVALID_CONFIG --> END[End Reconciliation] + UPDATE_STATUS --> END + REMOVE_FINALIZER --> END + + %% Subgraphs + subgraph "Validation" + VALIDATE_CONFIG + end + + subgraph "Create/Update Operations" + GENERATE_UUID + CREATE_OR_UPDATE_SECRET + CREATE_OR_UPDATE_BSL + UPDATE_STATUS + end + + subgraph "Delete Operations" + CHECK_SECRET_EXISTS + DELETE_SECRET + CHECK_BSL_EXISTS + DELETE_BSL + REMOVE_FINALIZER + end + + %% Styling + classDef phase fill:#ffcc99,stroke:#333,stroke-width:2px + classDef process fill:#b3d9ff,stroke:#333,stroke-width:2px + classDef decision fill:#ffeb99,stroke:#333,stroke-width:2px + classDef endpoint fill:#d9f2d9,stroke:#333,stroke-width:2px + + %% Apply styles + class START,END endpoint + class OPERATION,VALIDATE_CONFIG,CHECK_SECRET_EXISTS,CHECK_BSL_EXISTS decision + class GENERATE_UUID,CREATE_OR_UPDATE_SECRET,CREATE_OR_UPDATE_BSL,DELETE_SECRET,DELETE_BSL,REMOVE_FINALIZER process + class INVALID_CONFIG,UPDATE_STATUS phase +``` + +## Components + +### 1. Controller Structure +- **Name**: NonAdminBackupStorageLocation +- **Type**: Kubernetes Custom Resource Controller +- **Scope**: Namespace-scoped +- **Watch Resources**: BackupStorageLocation CRD + +### 2. Key Responsibilities +- Validate user permissions for Non-Admin BSL +- Manage Velero BSL lifecycle (create, update, delete) +- Manage Velero BSL Secret lifecycle (create, update, delete) +- Ensure namespace isolation +- Validate Non-Admin BSL configurations +- Update Non-Admin BSL status +- Generate and store Non-Admin BSL UUID in the NaBSL Status +- Use the UUID to create or update relevant resources + +### 3. Security Considerations +- Prevention of cross-namespace access by ensuring that user can only point to the namespace Secret and the resulting Velero BSL resource will point to the secret in the OADP namespace + +## Workflow + +### Non-Admin BSL Creation Flow +1. User submits a Non-Admin BSL creation request. +2. Controller verifies the Non-Admin BSL configuration including existance of the secret in user's namespace. +3. Controller generates Non-Admin BSL UUID and stores it in the NaBSL Status. +4. Controller creates or updates a Secret in the OADP namespace based on the Non-Admin BSL UUID. +5. Controller creates a Velero BSL resource in the OADP namespace pointing to the Secret from the OADP namespace. +6. Controller updates the NaBSL Status with the information from the created Velero BSL resource. + +### Non-Admin BSL Update Flow +1. User submits a Non-Admin BSL update request. +2. Controller validates changes +3. Controller updates the Secret and/or Velero BSL resource in the OADP namespace based on the Non-Admin BSL UUID. +4. Controller updates the NaBSL Status with the information from the updated Velero BSL resource. + +### Deletion Flow +1. User deletes the Non-Admin BSL resource. +2. Controller deletes the Secret from the OADP namespace based on the Non-Admin BSL UUID. +3. Controller deletes the Velero BSL resource from the OADP namespace based on the Non-Admin BSL UUID. +4. Controller removes the finalizer from the Non-Admin BSL resource. diff --git a/internal/common/constant/constant.go b/internal/common/constant/constant.go index a9f64ae..3c4b848 100644 --- a/internal/common/constant/constant.go +++ b/internal/common/constant/constant.go @@ -24,15 +24,23 @@ import "k8s.io/apimachinery/pkg/util/validation" // Annotations on the other hand should be used to define ownership // of the specific Object, such as Backup/Restore. const ( - OadpLabel = "openshift.io/oadp" // TODO import? - OadpLabelValue = TrueString - ManagedByLabel = "app.kubernetes.io/managed-by" - ManagedByLabelValue = "oadp-nac-controller" // TODO why not use same project name as in PROJECT file? - NabOriginNameAnnotation = "openshift.io/oadp-nab-origin-name" - NabOriginNamespaceAnnotation = "openshift.io/oadp-nab-origin-namespace" - NabOriginNACUUIDLabel = "openshift.io/oadp-nab-origin-nacuuid" - NarOriginNACUUIDLabel = "openshift.io/oadp-nar-origin-nacuuid" - NabFinalizerName = "nonadminbackup.oadp.openshift.io/finalizer" + OadpLabel = "openshift.io/oadp" // TODO import? + OadpLabelValue = TrueString + ManagedByLabel = "app.kubernetes.io/managed-by" + ManagedByLabelValue = "oadp-nac-controller" // TODO why not use same project name as in PROJECT file? + NabOriginNameAnnotation = "openshift.io/oadp-nab-origin-name" + NabOriginNamespaceAnnotation = "openshift.io/oadp-nab-origin-namespace" + NabslOriginNameAnnotation = "openshift.io/oadp-nabsl-origin-name" + NabslOriginNamespaceAnnotation = "openshift.io/oadp-nabsl-origin-namespace" + NabOriginNACUUIDLabel = "openshift.io/oadp-nab-origin-nacuuid" + NarOriginNACUUIDLabel = "openshift.io/oadp-nar-origin-nacuuid" + NabslOriginNACUUIDLabel = "openshift.io/oadp-nabsl-origin-nacuuid" + NabFinalizerName = "nonadminbackup.oadp.openshift.io/finalizer" + NabslFinalizerName = "nabsl.oadp.openshift.io/finalizer" + UUIDString = "UUID" + CurrentPhaseString = "currentPhase" + NamespaceString = "namespace" + NameString = "name" ) // Common environment variables for the Non Admin Controller diff --git a/internal/common/function/function.go b/internal/common/function/function.go index 4b0176c..40bc03b 100644 --- a/internal/common/function/function.go +++ b/internal/common/function/function.go @@ -24,6 +24,7 @@ import ( "github.com/go-logr/logr" "github.com/google/uuid" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -51,6 +52,14 @@ func GetNonAdminBackupAnnotations(objectMeta metav1.ObjectMeta) map[string]strin } } +// GetNonAdminBackupStorageLocationAnnotations return the required Non Admin annotations +func GetNonAdminBackupStorageLocationAnnotations(objectMeta metav1.ObjectMeta) map[string]string { + return map[string]string{ + constant.NabslOriginNamespaceAnnotation: objectMeta.Namespace, + constant.NabslOriginNameAnnotation: objectMeta.Name, + } +} + // containsOnlyNamespace checks if the given namespaces slice contains only the specified namespace func containsOnlyNamespace(namespaces []string, namespace string) bool { for _, ns := range namespaces { @@ -124,7 +133,7 @@ func GenerateNacObjectUUID(namespace, nacName string) string { func ListObjectsByLabel(ctx context.Context, clientInstance client.Client, namespace string, labelKey string, labelValue string, objectList client.ObjectList) error { // Validate input parameters if namespace == constant.EmptyString || labelKey == constant.EmptyString || labelValue == constant.EmptyString { - return fmt.Errorf("invalid input: namespace, labelKey, and labelValue must not be empty") + return fmt.Errorf("invalid input: namespace=%q, labelKey=%q, labelValue=%q", namespace, labelKey, labelValue) } labelSelector := labels.SelectorFromSet(labels.Set{labelKey: labelValue}) @@ -182,6 +191,48 @@ func GetVeleroDeleteBackupRequestByLabel(ctx context.Context, clientInstance cli } } +// GetBslSecretByLabel retrieves a Secret object based on a specified label within a given namespace. +// It returns the Secret only when exactly one object is found, throws an error if multiple secrets are found, +// or returns nil if no matches are found. +func GetBslSecretByLabel(ctx context.Context, clientInstance client.Client, namespace string, labelValue string) (*corev1.Secret, error) { + secretList := &corev1.SecretList{} + + // Call the generic ListLabeledObjectsInNamespace function + if err := ListObjectsByLabel(ctx, clientInstance, namespace, constant.NabslOriginNACUUIDLabel, labelValue, secretList); err != nil { + return nil, err + } + + switch len(secretList.Items) { + case 0: + return nil, nil // No matching DeleteBackupRequest found + case 1: + return &secretList.Items[0], nil // Found 1 matching DeleteBackupRequest + default: + return nil, fmt.Errorf("multiple Secret objects found with label %s=%s in namespace '%s'", velerov1.StorageLocationLabel, labelValue, namespace) + } +} + +// GetVeleroBackupStorageLocationByLabel retrieves a VeleroBackupStorageLocation object based on a specified label within a given namespace. +// It returns the VeleroBackupStorageLocation only when exactly one object is found, throws an error if multiple VeleroBackupStorageLocation are found, +// or returns nil if no matches are found. +func GetVeleroBackupStorageLocationByLabel(ctx context.Context, clientInstance client.Client, namespace string, labelValue string) (*velerov1.BackupStorageLocation, error) { + bslList := &velerov1.BackupStorageLocationList{} + + // Call the generic ListLabeledObjectsInNamespace function + if err := ListObjectsByLabel(ctx, clientInstance, namespace, constant.NabslOriginNACUUIDLabel, labelValue, bslList); err != nil { + return nil, err + } + + switch len(bslList.Items) { + case 0: + return nil, nil // No matching VeleroBackupStorageLocation found + case 1: + return &bslList.Items[0], nil // Found 1 matching VeleroBackupStorageLocation + default: + return nil, fmt.Errorf("multiple VeleroBackupStorageLocation objects found with label %s=%s in namespace '%s'", velerov1.StorageLocationLabel, labelValue, namespace) + } +} + // CheckVeleroBackupMetadata return true if Velero Backup object has required Non Admin labels and annotations, false otherwise func CheckVeleroBackupMetadata(obj client.Object) bool { objLabels := obj.GetLabels() @@ -207,6 +258,31 @@ func CheckVeleroBackupMetadata(obj client.Object) bool { return true } +// CheckVeleroBackupStorageLocationMetadata return true if Velero BackupStorageLocation object has required Non Admin labels and annotations, false otherwise +func CheckVeleroBackupStorageLocationMetadata(obj client.Object) bool { + objLabels := obj.GetLabels() + if !checkLabelValue(objLabels, constant.OadpLabel, constant.OadpLabelValue) { + return false + } + if !checkLabelValue(objLabels, constant.ManagedByLabel, constant.ManagedByLabelValue) { + return false + } + + if !checkLabelAnnotationValueIsValid(objLabels, constant.NabslOriginNACUUIDLabel) { + return false + } + + annotations := obj.GetAnnotations() + if !checkLabelAnnotationValueIsValid(annotations, constant.NabslOriginNamespaceAnnotation) { + return false + } + if !checkLabelAnnotationValueIsValid(annotations, constant.NabslOriginNameAnnotation) { + return false + } + + return true +} + func checkLabelValue(objLabels map[string]string, key string, value string) bool { got, exists := objLabels[key] if !exists { diff --git a/internal/common/function/function_test.go b/internal/common/function/function_test.go index 26fb977..c62a2cb 100644 --- a/internal/common/function/function_test.go +++ b/internal/common/function/function_test.go @@ -272,7 +272,7 @@ func TestGetVeleroBackupByLabel(t *testing.T) { labelValue: testAppStr, mockBackups: []velerov1.Backup{}, expected: nil, - expectedError: errors.New("invalid input: namespace, labelKey, and labelValue must not be empty"), + expectedError: errors.New("invalid input: namespace=\"\", labelKey=\"openshift.io/oadp-nab-origin-nacuuid\", labelValue=\"test-app\""), }, } @@ -523,7 +523,7 @@ func TestGetVeleroDeleteBackupRequestByLabel(t *testing.T) { labelValue: testAppStr, mockRequests: []velerov1.DeleteBackupRequest{}, expected: nil, - expectedError: errors.New("invalid input: namespace, labelKey, and labelValue must not be empty"), + expectedError: errors.New("invalid input: namespace=\"\", labelKey=\"velero.io/backup-name\", labelValue=\"test-app\""), }, } diff --git a/internal/controller/nonadminbackup_controller.go b/internal/controller/nonadminbackup_controller.go index 1ffbdeb..4fb6c1f 100644 --- a/internal/controller/nonadminbackup_controller.go +++ b/internal/controller/nonadminbackup_controller.go @@ -60,7 +60,6 @@ const ( statusUpdateError = "Failed to update NonAdminBackup Status" findSingleVBError = "Error encountered while retrieving VeleroBackup for NAB during the Delete operation" findSingleVDBRError = "Error encountered while retrieving DeleteBackupRequest for NAB during the Delete operation" - uuidString = "UUID" nameString = "name" ) @@ -71,6 +70,10 @@ const ( // +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=velero.io,resources=deletebackuprequests,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state, // defined in NonAdminBackup object Spec. @@ -261,7 +264,7 @@ func (r *NonAdminBackupReconciler) createVeleroDeleteBackupRequest(ctx context.C if err != nil { // Log error if multiple VeleroBackup objects are found - logger.Error(err, findSingleVBError, uuidString, veleroBackupNACUUID) + logger.Error(err, findSingleVBError, constant.UUIDString, veleroBackupNACUUID) return false, err } @@ -273,7 +276,7 @@ func (r *NonAdminBackupReconciler) createVeleroDeleteBackupRequest(ctx context.C deleteBackupRequest, err := function.GetVeleroDeleteBackupRequestByLabel(ctx, r.Client, r.OADPNamespace, veleroBackupNACUUID) if err != nil { // Log error if multiple DeleteBackupRequest objects are found - logger.Error(err, findSingleVDBRError, uuidString, veleroBackupNACUUID) + logger.Error(err, findSingleVDBRError, constant.UUIDString, veleroBackupNACUUID) return false, err } @@ -345,7 +348,7 @@ func (r *NonAdminBackupReconciler) deleteVeleroBackupAndDeleteBackupRequestObjec if err != nil { // Case where more than one VeleroBackup is found with the same label UUID // TODO (migi): Determine if all objects with this UUID should be deleted - logger.Error(err, findSingleVBError, uuidString, veleroBackupNACUUID) + logger.Error(err, findSingleVBError, constant.UUIDString, veleroBackupNACUUID) return false, err } @@ -362,7 +365,7 @@ func (r *NonAdminBackupReconciler) deleteVeleroBackupAndDeleteBackupRequestObjec deleteBackupRequest, err := function.GetVeleroDeleteBackupRequestByLabel(ctx, r.Client, r.OADPNamespace, veleroBackupNACUUID) if err != nil { // Log error if multiple DeleteBackupRequest objects are found - logger.Error(err, findSingleVDBRError, uuidString, veleroBackupNACUUID) + logger.Error(err, findSingleVDBRError, constant.UUIDString, veleroBackupNACUUID) return false, err } if deleteBackupRequest != nil { @@ -407,7 +410,7 @@ func (r *NonAdminBackupReconciler) removeNabFinalizerUponVeleroBackupDeletion(ct if err != nil { // Case in which more then one VeleroBackup is found with the same label UUID // TODO (migi): Should we delete all of the objects with such UUID ? - logger.Error(err, findSingleVBError, uuidString, veleroBackupNACUUID) + logger.Error(err, findSingleVBError, constant.UUIDString, veleroBackupNACUUID) return false, err } @@ -595,12 +598,12 @@ func (r *NonAdminBackupReconciler) createVeleroBackupAndSyncWithNonAdminBackup(c if err != nil { // Case in which more then one VeleroBackup is found with the same label UUID - logger.Error(err, findSingleVBError, uuidString, veleroBackupNACUUID) + logger.Error(err, findSingleVBError, constant.UUIDString, veleroBackupNACUUID) return false, err } if veleroBackup == nil { - logger.Info("VeleroBackup with label not found, creating one", uuidString, veleroBackupNACUUID) + logger.Info("VeleroBackup with label not found, creating one", constant.UUIDString, veleroBackupNACUUID) backupSpec := nab.Spec.BackupSpec.DeepCopy() backupSpec.IncludedNamespaces = []string{nab.Namespace} @@ -712,11 +715,21 @@ func updateNonAdminBackupVeleroBackupStatus(status *nacv1alpha1.NonAdminBackupSt if status.VeleroBackup == nil { status.VeleroBackup = &nacv1alpha1.VeleroBackup{} } - if status.VeleroBackup.Status == nil || !reflect.DeepEqual(status.VeleroBackup.Status, veleroBackup.Status) { - status.VeleroBackup.Status = veleroBackup.Status.DeepCopy() - return true + + // Treat nil as equivalent to a zero-value struct + currentStatus := velerov1.BackupStatus{} + if status.VeleroBackup.Status != nil { + currentStatus = *status.VeleroBackup.Status } - return false + + // Return false if both statuses are equivalent + if reflect.DeepEqual(currentStatus, veleroBackup.Status) { + return false + } + + // Update and return true if they differ + status.VeleroBackup.Status = veleroBackup.Status.DeepCopy() + return true } // updateNonAdminBackupDeleteBackupRequestStatus sets the VeleroDeleteBackupRequest status field in NonAdminBackup object status and returns true @@ -728,9 +741,19 @@ func updateNonAdminBackupDeleteBackupRequestStatus(status *nacv1alpha1.NonAdminB if status.VeleroDeleteBackupRequest == nil { status.VeleroDeleteBackupRequest = &nacv1alpha1.VeleroDeleteBackupRequest{} } - if status.VeleroDeleteBackupRequest.Status == nil || !reflect.DeepEqual(status.VeleroDeleteBackupRequest.Status, veleroDeleteBackupRequest.Status) { - status.VeleroDeleteBackupRequest.Status = veleroDeleteBackupRequest.Status.DeepCopy() - return true + + // Treat nil as equivalent to a zero-value struct + currentStatus := velerov1.DeleteBackupRequestStatus{} + if status.VeleroDeleteBackupRequest.Status != nil { + currentStatus = *status.VeleroDeleteBackupRequest.Status + } + + // Return false if both statuses are equivalent + if reflect.DeepEqual(currentStatus, veleroDeleteBackupRequest.Status) { + return false } - return false + + // Update and return true if they differ + status.VeleroDeleteBackupRequest.Status = veleroDeleteBackupRequest.Status.DeepCopy() + return true } diff --git a/internal/controller/nonadminbackup_controller_test.go b/internal/controller/nonadminbackup_controller_test.go index feb7f96..e8dea3d 100644 --- a/internal/controller/nonadminbackup_controller_test.go +++ b/internal/controller/nonadminbackup_controller_test.go @@ -696,10 +696,8 @@ var _ = ginkgo.Describe("Test single reconciles of NonAdminBackup Reconcile func }, }, nonAdminBackupExpectedStatus: nacv1alpha1.NonAdminBackupStatus{ - Phase: nacv1alpha1.NonAdminBackupPhaseCreated, - VeleroBackup: &nacv1alpha1.VeleroBackup{ - Status: &velerov1.BackupStatus{}, - }, + Phase: nacv1alpha1.NonAdminBackupPhaseCreated, + VeleroBackup: &nacv1alpha1.VeleroBackup{}, Conditions: []metav1.Condition{ { Type: "Accepted", diff --git a/internal/controller/nonadminbackupstoragelocation_controller.go b/internal/controller/nonadminbackupstoragelocation_controller.go index 9607464..bcdfda1 100644 --- a/internal/controller/nonadminbackupstoragelocation_controller.go +++ b/internal/controller/nonadminbackupstoragelocation_controller.go @@ -18,24 +18,50 @@ package controller import ( "context" + "fmt" + "reflect" + "github.com/go-logr/logr" + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/builder" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" + "github.com/migtools/oadp-non-admin/internal/common/constant" + "github.com/migtools/oadp-non-admin/internal/common/function" + "github.com/migtools/oadp-non-admin/internal/handler" + "github.com/migtools/oadp-non-admin/internal/predicate" +) + +const ( + veleroBSLReferenceUpdated = "NonAdminBackupStorageLocation - Status Updated with UUID reference" + statusBslUpdateError = "Failed to update NonAdminBackupStorageLocation Status" + findSingleVBSLSecretError = "Error encountered while retrieving Velero BSL Secret for NABSL" + failedUpdateStatusError = "Failed to update status" + failedUpdateConditionError = "Failed to update status condition" ) // NonAdminBackupStorageLocationReconciler reconciles a NonAdminBackupStorageLocation object type NonAdminBackupStorageLocationReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + OADPNamespace string } -//+kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations/finalizers,verbs=update +type naBSLReconcileStepFunction func(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) + +// +kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=oadp.openshift.io,resources=nonadminbackupstoragelocations/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -47,16 +73,624 @@ type NonAdminBackupStorageLocationReconciler struct { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.17.3/pkg/reconcile func (r *NonAdminBackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + logger := log.FromContext(ctx) + logger.V(1).Info("NonAdminBackup Reconcile start") + + // Get the NonAdminBackupStorageLocation object + nabsl := &nacv1alpha1.NonAdminBackupStorageLocation{} + err := r.Get(ctx, req.NamespacedName, nabsl) + if err != nil { + if apierrors.IsNotFound(err) { + logger.V(1).Info(err.Error()) + return ctrl.Result{}, nil + } + logger.Error(err, "Unable to fetch NonAdminBackupStorageLocation") + return ctrl.Result{}, err + } + + // Determine which path to take + var reconcileSteps []naBSLReconcileStepFunction + + // First switch statement takes precedence over the next one + switch { + case !nabsl.ObjectMeta.DeletionTimestamp.IsZero(): + logger.V(1).Info("Executing direct deletion path") + reconcileSteps = []naBSLReconcileStepFunction{ + r.initNaBSLDelete, + r.deleteVeleroBSLSecret, + r.deleteVeleroBSL, + r.removeNaBSLFinalizerUponVeleroBSLDeletion, + } + default: + // Standard creation/update path + logger.V(1).Info("Executing nabsl creation/update path") + reconcileSteps = []naBSLReconcileStepFunction{ + r.initNaBSLCreate, + r.validateNaBSLSpec, + r.setVeleroBSLUUIDInNaBSLStatus, + r.setFinalizerOnNaBSL, + r.createSyncNaBSLSecrets, + r.createVeleroBSL, + r.syncVeleroBSLWithNaBSL, + } + } - // TODO(user): your logic here + // Execute the selected reconciliation steps + for _, step := range reconcileSteps { + requeue, err := step(ctx, logger, nabsl) + if err != nil { + return ctrl.Result{}, err + } else if requeue { + return ctrl.Result{Requeue: true}, nil + } + } + logger.V(1).Info("NonAdminBackup Reconcile exit") return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. +// Note: Adding Secret Watch within the namespace is being considered. +// Challenges with Secret Watch: +// - Secret updates without NaBSL object updates would be missed +// - One secret can be used by multiple NaBSL objects +// - Would need to add VeleroBackupStorageLocation UUID labels/annotations +// to ensure correct Secret-to-NaBSL mapping or get all the NaBSL objects and check +// if that particular secret is being used by any of them. func (r *NonAdminBackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&nacv1alpha1.NonAdminBackupStorageLocation{}). + WithEventFilter( + predicate.CompositeNaBSLPredicate{ + NonAdminBackupStorageLocationPredicate: predicate.NonAdminBackupStorageLocationPredicate{}, + VeleroBackupStorageLocationPredicate: predicate.VeleroBackupStorageLocationPredicate{ + OADPNamespace: r.OADPNamespace, + }, + }). + Watches(&velerov1.BackupStorageLocation{}, &handler.VeleroBackupStorageLocationHandler{}). Complete(r) } + +// initNaBSLDelete initializes deletion of the NonAdminBackupStorageLocation object +func (r *NonAdminBackupStorageLocationReconciler) initNaBSLDelete(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + logger.V(1).Info("NonAdminBackupStorageLocation deletion initialized") + + // Set phase to Deleting + if updated := updateNaBSLPhase(&nabsl.Status.Phase, nacv1alpha1.NaBSLPhaseDeleting); updated { + if err := r.Status().Update(ctx, nabsl); err != nil { + logger.Error(err, statusBslUpdateError) + return false, err + } + } + return false, nil +} + +// deleteVeleroBSLSecret deletes the Secret associated with the VeleroBackupStorageLocation object that was created by the controller +func (r *NonAdminBackupStorageLocationReconciler) deleteVeleroBSLSecret(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + veleroObjectsNACUUID := nabsl.Status.VeleroBackupStorageLocation.NACUUID + + veleroBslSecret, err := function.GetBslSecretByLabel(ctx, r.Client, r.OADPNamespace, veleroObjectsNACUUID) + if err != nil { + logger.Error(err, findSingleVBSLSecretError) + return false, err + } + + if veleroBslSecret == nil { + logger.V(1).Info("Velero BackupStorageLocation Secret not found") + return false, nil + } + + if err := r.Delete(ctx, veleroBslSecret); err != nil { + logger.Error(err, "Failed to delete Velero BackupStorageLocation Secret") + return false, err + } + + logger.V(1).Info("Velero BackupStorageLocation Secret deleted") + + return false, nil +} + +// deleteVeleroBSL deletes the associated VeleroBackupStorageLocation object +func (r *NonAdminBackupStorageLocationReconciler) deleteVeleroBSL(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + veleroObjectsNACUUID := nabsl.Status.VeleroBackupStorageLocation.NACUUID + + veleroBsl, err := function.GetVeleroBackupStorageLocationByLabel(ctx, r.Client, r.OADPNamespace, veleroObjectsNACUUID) + + if veleroBsl == nil { + logger.V(1).Info("Velero BackupStorageLocation not found") + return false, nil + } + + if err != nil { + logger.Error(err, "Failed to get Velero BackupStorageLocation") + return false, err + } + + if err := r.Delete(ctx, veleroBsl); err != nil { + logger.Error(err, "Failed to delete Velero BackupStorageLocation") + return false, err + } + + logger.V(1).Info("Velero BackupStorageLocation deleted") + + return false, nil +} + +// removeNaBSLFinalizerUponVeleroBSLDeletion removes the finalizer from NonAdminBackupStorageLocation +// after confirming the VeleroBackupStorageLocation is deleted +func (r *NonAdminBackupStorageLocationReconciler) removeNaBSLFinalizerUponVeleroBSLDeletion(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + if !controllerutil.ContainsFinalizer(nabsl, constant.NabslFinalizerName) { + logger.V(1).Info("NonAdminBackupStorageLocation finalizer not found") + return false, nil + } + + controllerutil.RemoveFinalizer(nabsl, constant.NabslFinalizerName) + if err := r.Update(ctx, nabsl); err != nil { + logger.Error(err, "Failed to remove finalizer") + return false, err + } + + logger.V(1).Info("NonAdminBackupStorageLocation finalizer removed") + + return false, nil +} + +// initNaBSLCreate initializes creation of the NonAdminBackupStorageLocation object +func (r *NonAdminBackupStorageLocationReconciler) initNaBSLCreate(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + if nabsl.Status.Phase != constant.EmptyString { + logger.V(1).Info("NonAdminBackupStorageLocation Phase already initialized", constant.CurrentPhaseString, nabsl.Status.Phase) + return false, nil + } + + // Set phase to New + if updated := updateNaBSLPhase(&nabsl.Status.Phase, nacv1alpha1.NaBSLPhaseNew); updated { + if err := r.Status().Update(ctx, nabsl); err != nil { + logger.Error(err, statusBslUpdateError) + return false, err + } + logger.V(1).Info("NonAdminBackupStorageLocation Phase set to New") + } else { + logger.V(1).Info("NonAdminBackupStorageLocation Phase update skipped", constant.CurrentPhaseString, nabsl.Status.Phase) + } + return false, nil +} + +// validateNaBSLSpec validates the NonAdminBackupStorageLocation spec +func (r *NonAdminBackupStorageLocationReconciler) validateNaBSLSpec(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + // Skip validation if not in New phase + if nabsl.Status.Phase != nacv1alpha1.NaBSLPhaseNew { + logger.V(1).Info("Skipping validation, not in New phase", constant.CurrentPhaseString, nabsl.Status.Phase) + return false, nil + } + + // Check if credentials secret is specified + if nabsl.Spec.Credential == nil || nabsl.Spec.Credential.Name == "" { + err := fmt.Errorf("credentials secret name is required") + logger.Error(err, "Validation failed") + + // Update status condition + meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminConditionAccepted), + Status: metav1.ConditionFalse, + Reason: "ValidationFailed", + Message: err.Error(), + }) + + if err := r.Status().Update(ctx, nabsl); err != nil { + logger.Error(err, failedUpdateStatusError) + return false, err + } + return false, nil + } + + // Check if the secret exists in the same namespace + secret := &corev1.Secret{} + if err := r.Get(ctx, types.NamespacedName{ + Namespace: nabsl.Namespace, + Name: nabsl.Spec.Credential.Name, + }, secret); err != nil { + if apierrors.IsNotFound(err) { + logger.Error(err, "Credentials secret not found", "secretName", nabsl.Spec.Credential.Name) + + // Update status condition + meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminConditionAccepted), + Status: metav1.ConditionFalse, + Reason: "SecretNotFound", + Message: fmt.Sprintf("Credentials secret %s not found in namespace %s", nabsl.Spec.Credential.Name, nabsl.Namespace), + }) + + if errStatus := r.Status().Update(ctx, nabsl); errStatus != nil { + logger.Error(errStatus, failedUpdateStatusError) + // We don't return the error here because we are interested from the + // secret not found error + } + } + logger.Error(err, "Failed to get credentials secret") + return false, err + } + + // TODO: Add validation for the secret data and other BSL spec fields + + // Validation successful, update phase and condition + updatedPhase := updateNaBSLPhase(&nabsl.Status.Phase, nacv1alpha1.NaBSLPhaseNew) + + updatedCondition := meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminConditionAccepted), + Status: metav1.ConditionTrue, + Reason: "ValidationSucceeded", + Message: "NonAdminBackupStorageLocation spec validation successful", + }) + + if updatedPhase || updatedCondition { + if updateErr := r.Status().Update(ctx, nabsl); updateErr != nil { + logger.Error(updateErr, failedUpdateStatusError) + return false, updateErr + } + logger.V(1).Info("NonAdminBackupStorageLocation Phase set to Accepted") + logger.V(1).Info("NonAdminBackupStorageLocation Condition set to Validated") + } + + return false, nil +} + +// setVeleroBSLUUIDInNaBSLStatus sets the UUID for the VeleroBackupStorageLocation in the NonAdminBackupStorageLocation status +func (r *NonAdminBackupStorageLocationReconciler) setVeleroBSLUUIDInNaBSLStatus(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + // Get the latest version of the NAB object just before checking if the NACUUID is set + // to ensure we do not miss any updates to the NAB object + nabslOriginal := nabsl.DeepCopy() + if err := r.Get(ctx, types.NamespacedName{Name: nabslOriginal.Name, Namespace: nabslOriginal.Namespace}, nabsl); err != nil { + logger.Error(err, "Failed to re-fetch NonAdminBackupStorageLocation") + return false, err + } + + if nabsl.Status.VeleroBackupStorageLocation == nil || nabsl.Status.VeleroBackupStorageLocation.NACUUID == constant.EmptyString { + veleroBslNACUUID := function.GenerateNacObjectUUID(nabsl.Namespace, nabsl.Name) + nabsl.Status.VeleroBackupStorageLocation = &nacv1alpha1.VeleroBackupStorageLocation{ + NACUUID: veleroBslNACUUID, + Namespace: r.OADPNamespace, + Name: veleroBslNACUUID, + } + if err := r.Status().Update(ctx, nabsl); err != nil { + logger.Error(err, statusUpdateError) + return false, err + } + logger.V(1).Info(veleroBSLReferenceUpdated) + } else { + logger.V(1).Info("NonAdminBackupStorageLocation already contains VeleroBackupStorageLocation UUID reference") + } + return false, nil +} + +// setFinalizerOnNaBSL sets the finalizer on the NonAdminBackupStorageLocation object +func (r *NonAdminBackupStorageLocationReconciler) setFinalizerOnNaBSL(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + // If the object does not have the finalizer, add it before creating Velero BackupStorageLocation and relevant secret + // to ensure we won't risk having orphant resources. + if !controllerutil.ContainsFinalizer(nabsl, constant.NabslFinalizerName) { + controllerutil.AddFinalizer(nabsl, constant.NabslFinalizerName) + if err := r.Update(ctx, nabsl); err != nil { + logger.Error(err, "Failed to add finalizer") + return false, err + } + logger.V(1).Info("Finalizer added to NonAdminBackupStorageLocation", "finalizer", constant.NabslFinalizerName) + } else { + logger.V(1).Info("Finalizer exists on the NonAdminBackupStorageLocation object", "finalizer", constant.NabslFinalizerName) + } + return false, nil +} + +// createSyncNaBSLSecrets creates the VeleroBackupStorageLocation secret in the OADP namespace +func (r *NonAdminBackupStorageLocationReconciler) createSyncNaBSLSecrets(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + // Skip syncing if the VeleroBackupStorageLocation UUID is not set or the source secret is not set in the spec + if nabsl.Status.VeleroBackupStorageLocation == nil || + nabsl.Status.VeleroBackupStorageLocation.NACUUID == constant.EmptyString || + nabsl.Spec.Credential == nil || + nabsl.Spec.Credential.Name == constant.EmptyString { + return false, nil + } + + // Get the source secret from the NonAdminBackupStorageLocation namespace + sourceNaBSLSecret := &corev1.Secret{} + if err := r.Get(ctx, types.NamespacedName{ + Namespace: nabsl.Namespace, + Name: nabsl.Spec.Credential.Name, + }, sourceNaBSLSecret); err != nil { + logger.Error(err, "Failed to get secret", "secretName", nabsl.Spec.Credential.Name) + return false, err + } + + veleroObjectsNACUUID := nabsl.Status.VeleroBackupStorageLocation.NACUUID + + veleroBslSecret, err := function.GetBslSecretByLabel(ctx, r.Client, r.OADPNamespace, veleroObjectsNACUUID) + + if err != nil { + logger.Error(err, findSingleVBSLSecretError, constant.UUIDString, veleroObjectsNACUUID) + return false, err + } + + if veleroBslSecret == nil { + logger.Info("Velero BSL Secret with label not found, creating one", "oadpnamespace", r.OADPNamespace, constant.UUIDString, veleroObjectsNACUUID) + + veleroBslSecret = builder.ForSecret(r.OADPNamespace, veleroObjectsNACUUID). + ObjectMeta( + builder.WithLabels( + constant.NabslOriginNACUUIDLabel, veleroObjectsNACUUID, + ), + builder.WithLabelsMap(function.GetNonAdminLabels()), + builder.WithAnnotationsMap(function.GetNonAdminBackupStorageLocationAnnotations(nabsl.ObjectMeta)), + ).Result() + } + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, veleroBslSecret, func() error { + // Do not Sync additional labels and annotations from source secret + // This could lead to unexpected behavior if the user specifies + // nac specific labels or annotations on the source secret + + // Sync secret data + veleroBslSecret.Type = sourceNaBSLSecret.Type + veleroBslSecret.Data = make(map[string][]byte) + for k, v := range sourceNaBSLSecret.Data { + veleroBslSecret.Data[k] = v + } + return nil + }) + + if err != nil { + logger.Error(err, "Failed to sync secret to OADP namespace") + updatedCondition := meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminBSLConditionSecretSynced), + Status: metav1.ConditionFalse, + Reason: "SecretSyncFailed", + Message: "Failed to sync secret to OADP namespace", + }) + if updatedCondition { + if updateErr := r.Status().Update(ctx, nabsl); updateErr != nil { + logger.Error(updateErr, failedUpdateStatusError) + return false, updateErr + } + } + return false, err + } + + secretSyncedCondition := false + + switch op { + case controllerutil.OperationResultCreated: + logger.V(1).Info("VeleroBackupStorageLocation secret created successfully", + constant.NamespaceString, veleroBslSecret.Namespace, + constant.NameString, veleroBslSecret.Name) + // Use case where secret was removed from OADP instance and needs to be re-created + meta.RemoveStatusCondition(&nabsl.Status.Conditions, string(nacv1alpha1.NonAdminBSLConditionSecretSynced)) + secretSyncedCondition = meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminBSLConditionSecretSynced), + Status: metav1.ConditionTrue, + Reason: "SecretCreated", + Message: "Secret successfully created in the OADP namespace", + }) + case controllerutil.OperationResultUpdated: + logger.V(1).Info("VeleroBackupStorageLocation secret updated successfully", + constant.NamespaceString, veleroBslSecret.Namespace, + constant.NameString, veleroBslSecret.Name) + // Ensure last transition time is correctly showing last update + meta.RemoveStatusCondition(&nabsl.Status.Conditions, string(nacv1alpha1.NonAdminBSLConditionSecretSynced)) + secretSyncedCondition = meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminBSLConditionSecretSynced), + Status: metav1.ConditionTrue, + Reason: "SecretUpdated", + Message: "Secret successfully updated in the OADP namespace", + }) + case controllerutil.OperationResultNone: + logger.V(1).Info("VeleroBackupStorageLocation secret unchanged", + constant.NamespaceString, veleroBslSecret.Namespace, + constant.NameString, veleroBslSecret.Name) + } + + if secretSyncedCondition { + if updateErr := r.Status().Update(ctx, nabsl); updateErr != nil { + logger.Error(updateErr, failedUpdateStatusError) + return false, updateErr + } + } + + return false, nil +} + +// createVeleroBSL creates a VeleroBackupStorageLocation and syncs its status with NonAdminBackupStorageLocation +func (r *NonAdminBackupStorageLocationReconciler) createVeleroBSL(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + if nabsl.Status.VeleroBackupStorageLocation == nil || + nabsl.Status.VeleroBackupStorageLocation.NACUUID == constant.EmptyString { + return false, nil + } + + veleroObjectsNACUUID := nabsl.Status.VeleroBackupStorageLocation.NACUUID + + // Check if VeleroBackupStorageLocation already exists + veleroBsl, err := function.GetVeleroBackupStorageLocationByLabel(ctx, r.Client, r.OADPNamespace, veleroObjectsNACUUID) + if err != nil { + logger.Error(err, "Failed to get VeleroBackupStorageLocation", constant.UUIDString, veleroObjectsNACUUID) + return false, err + } + // Get the VeleroBackupStorageLocation secret to be used as the credential for the VeleroBackupStorageLocation + veleroBslSecret, err := function.GetBslSecretByLabel(ctx, r.Client, r.OADPNamespace, veleroObjectsNACUUID) + + if err != nil { + logger.Error(err, findSingleVBSLSecretError, constant.UUIDString, veleroObjectsNACUUID) + return false, err + } + + if veleroBslSecret == nil { + logger.Error(err, "Failed to get VeleroBackupStorageLocation secret", constant.UUIDString, veleroObjectsNACUUID) + return false, err + } + + // Create VeleroBackupStorageLocation + if veleroBsl == nil { + logger.Info("Velero BSL with label not found, creating one", "oadpnamespace", r.OADPNamespace, constant.UUIDString, veleroObjectsNACUUID) + + veleroBsl = builder.ForBackupStorageLocation(r.OADPNamespace, veleroObjectsNACUUID). + ObjectMeta( + builder.WithLabels( + constant.NabslOriginNACUUIDLabel, veleroObjectsNACUUID, + ), + builder.WithLabelsMap(function.GetNonAdminLabels()), + builder.WithAnnotationsMap(function.GetNonAdminBackupStorageLocationAnnotations(nabsl.ObjectMeta)), + ).Result() + } + + // We use Credential from the secret created in the createSyncNaBSLSecrets function + // however we need to set the key to the one specified in the NonAdminBackupStorageLocation spec + // because it's the user who decides which key to use from the secret + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, veleroBsl, func() error { + veleroBsl.Spec.AccessMode = nabsl.Spec.AccessMode + veleroBsl.Spec.BackupSyncPeriod = nabsl.Spec.BackupSyncPeriod + veleroBsl.Spec.Config = nabsl.Spec.Config + veleroBsl.Spec.Credential = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: veleroBslSecret.Name, + }, + } + veleroBsl.Spec.Credential.Key = nabsl.Spec.Credential.Key + veleroBsl.Spec.Default = nabsl.Spec.Default + veleroBsl.Spec.ObjectStorage = nabsl.Spec.ObjectStorage + veleroBsl.Spec.Provider = nabsl.Spec.Provider + veleroBsl.Spec.ValidationFrequency = nabsl.Spec.ValidationFrequency + + return nil + }) + + bslCondition := false + + // If there's an error, set the BSLSynced condition to false + if err != nil { + logger.Error(err, "VeleroBackupStorageLocation sync failure", "operation", op, constant.UUIDString, veleroObjectsNACUUID, constant.NamespaceString, veleroBsl.Namespace, constant.NameString, veleroBsl.Name) + meta.RemoveStatusCondition(&nabsl.Status.Conditions, string(nacv1alpha1.NonAdminBSLConditionBSLSynced)) + bslCondition = meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminBSLConditionBSLSynced), + Status: metav1.ConditionFalse, + Reason: "BackupStorageLocationSyncError", + Message: "BackupStorageLocation failure during sync", + }) + if bslCondition { + if updateErr := r.Status().Update(ctx, nabsl); updateErr != nil { + logger.Error(updateErr, failedUpdateStatusError) + // We don't return the error here because we are interested from the + // VeleroBackupStorageLocation sync status error + } + } + return false, err + } + + // Log different messages based on the operation performed + switch op { + case controllerutil.OperationResultCreated: + logger.V(1).Info("VeleroBackupStorageLocation created successfully", + constant.NamespaceString, veleroBsl.Namespace, + constant.NameString, veleroBsl.Name) + // Remove condition to ensure update time is not the one from the first + // BSLCreated condition occurrence. Use case where BSL was removed from the + // OADP namespace and needs to be re-created. + meta.RemoveStatusCondition(&nabsl.Status.Conditions, string(nacv1alpha1.NonAdminBSLConditionBSLSynced)) + bslCondition = meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminBSLConditionBSLSynced), + Status: metav1.ConditionTrue, + Reason: "BackupStorageLocationCreated", + Message: "BackupStorageLocation successfully created in the OADP namespace", + }) + case controllerutil.OperationResultUpdated: + logger.V(1).Info("VeleroBackupStorageLocation updated successfully", + constant.NamespaceString, veleroBsl.Namespace, + constant.NameString, veleroBsl.Name) + // Remove condition to ensure update time is not the one from the first + // BSLUpdated condition occurrence + meta.RemoveStatusCondition(&nabsl.Status.Conditions, string(nacv1alpha1.NonAdminBSLConditionBSLSynced)) + bslCondition = meta.SetStatusCondition(&nabsl.Status.Conditions, metav1.Condition{ + Type: string(nacv1alpha1.NonAdminBSLConditionBSLSynced), + Status: metav1.ConditionTrue, + Reason: "BackupStorageLocationUpdated", + Message: "BackupStorageLocation successfully updated in the OADP namespace", + }) + case controllerutil.OperationResultNone: + logger.V(1).Info("VeleroBackupStorageLocation unchanged", + constant.NamespaceString, veleroBsl.Namespace, + constant.NameString, veleroBsl.Name) + } + updatedPhase := updateNaBSLPhase(&nabsl.Status.Phase, nacv1alpha1.NaBSLPhaseCreated) + + if bslCondition || updatedPhase { + if updateErr := r.Status().Update(ctx, nabsl); updateErr != nil { + logger.Error(updateErr, failedUpdateStatusError) + return false, updateErr + } + } + + return false, nil +} + +// syncVeleroBSLWithNaBSL +func (r *NonAdminBackupStorageLocationReconciler) syncVeleroBSLWithNaBSL(ctx context.Context, logger logr.Logger, nabsl *nacv1alpha1.NonAdminBackupStorageLocation) (bool, error) { + veleroObjectsNACUUID := nabsl.Status.VeleroBackupStorageLocation.NACUUID + + // Check if VeleroBackupStorageLocation already exists + veleroBsl, err := function.GetVeleroBackupStorageLocationByLabel(ctx, r.Client, r.OADPNamespace, veleroObjectsNACUUID) + if err != nil { + logger.Error(err, "Failed to get VeleroBackupStorageLocation", constant.UUIDString, veleroObjectsNACUUID) + return false, err + } + + // Ensure that the NonAdminBackup's NonAdminBackupStatus is in sync + // with the VeleroBackup. Any required updates to the NonAdminBackup + // Status will be applied based on the current state of the VeleroBackup. + updated := updateNaBSLVeleroBackupStorageLocationStatus(&nabsl.Status, veleroBsl) + if updated { + if err := r.Status().Update(ctx, nabsl); err != nil { + logger.Error(err, "Failed to update NonAdminBackupStorageLocation Status after VeleroBackupStorageLocation reconciliation") + return false, err + } + logger.V(1).Info("NonAdminBackupStorageLocation Status updated successfully") + } else { + logger.V(1).Info("NonAdminBackup Status unchanged") + } + + return false, nil +} + +// updateNaBSLVeleroBackupStorageLocationStatus sets the VeleroBackupStorageLocation status field in NonAdminBackupStorageLocation object status and returns true +// if the VeleroBackupStorageLocation fields are changed by this call. +func updateNaBSLVeleroBackupStorageLocationStatus(status *nacv1alpha1.NonAdminBackupStorageLocationStatus, veleroBackupStorageLocation *velerov1.BackupStorageLocation) bool { + if status == nil || veleroBackupStorageLocation == nil { + return false + } + if status.VeleroBackupStorageLocation == nil { + status.VeleroBackupStorageLocation = &nacv1alpha1.VeleroBackupStorageLocation{} + } + + // Treat nil as equivalent to a zero-value struct + currentStatus := velerov1.BackupStorageLocationStatus{} + if status.VeleroBackupStorageLocation.Status != nil { + currentStatus = *status.VeleroBackupStorageLocation.Status + } + + // Return false if both statuses are equivalent + if reflect.DeepEqual(currentStatus, veleroBackupStorageLocation.Status) { + return false + } + + // Update and return true if they differ + status.VeleroBackupStorageLocation.Status = veleroBackupStorageLocation.Status.DeepCopy() + return true +} + +// updateNaBSLPhase updates the phase of the NonAdminBackupStorageLocation +func updateNaBSLPhase(phase *nacv1alpha1.NonAdminBackupStorageLocationPhase, newPhase nacv1alpha1.NonAdminBackupStorageLocationPhase) bool { + // Ensure phase is valid + if newPhase == constant.EmptyString { + return false + } + + if *phase == newPhase { + return false + } + + *phase = newPhase + return true +} diff --git a/internal/controller/nonadminbackupstoragelocation_controller_test.go b/internal/controller/nonadminbackupstoragelocation_controller_test.go deleted file mode 100644 index ff541d6..0000000 --- a/internal/controller/nonadminbackupstoragelocation_controller_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2024. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" -) - -var _ = Describe("NonAdminBackupStorageLocation Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" - - ctx := context.Background() - - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - nonadminbackupstoragelocation := &nacv1alpha1.NonAdminBackupStorageLocation{} - - BeforeEach(func() { - By("creating the custom resource for the Kind NonAdminBackupStorageLocation") - err := k8sClient.Get(ctx, typeNamespacedName, nonadminbackupstoragelocation) - if err != nil && errors.IsNotFound(err) { - resource := &nacv1alpha1.NonAdminBackupStorageLocation{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - // TODO(user): Specify other spec details if needed. - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) - } - }) - - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &nacv1alpha1.NonAdminBackupStorageLocation{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - By("Cleanup the specific resource instance NonAdminBackupStorageLocation") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &NonAdminBackupStorageLocationReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } - - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. - }) - }) -}) diff --git a/internal/handler/velerobackupstoragelocation_handler.go b/internal/handler/velerobackupstoragelocation_handler.go new file mode 100644 index 0000000..bdac2bc --- /dev/null +++ b/internal/handler/velerobackupstoragelocation_handler.go @@ -0,0 +1,63 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package handler contains all event handlers of the project +package handler + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/migtools/oadp-non-admin/internal/common/constant" + "github.com/migtools/oadp-non-admin/internal/common/function" +) + +// VeleroBackupStorageLocationHandler contains event handlers for Velero BackupStorageLocation objects +type VeleroBackupStorageLocationHandler struct{} + +// Create event handler +func (VeleroBackupStorageLocationHandler) Create(_ context.Context, _ event.CreateEvent, _ workqueue.RateLimitingInterface) { + // Create event handler for the BackupStorageLocation object +} + +// Update event handler adds Velero BackupStorageLocation's NonAdminBackupStorageLocation to controller queue +func (VeleroBackupStorageLocationHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + logger := function.GetLogger(ctx, evt.ObjectNew, "VeleroBackupStorageLocationHandler") + + annotations := evt.ObjectNew.GetAnnotations() + nabslOriginNamespace := annotations[constant.NabslOriginNamespaceAnnotation] + nabslOriginName := annotations[constant.NabslOriginNameAnnotation] + + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: nabslOriginName, + Namespace: nabslOriginNamespace, + }}) + logger.V(1).Info("Handled Update event") +} + +// Delete event handler +func (VeleroBackupStorageLocationHandler) Delete(_ context.Context, _ event.DeleteEvent, _ workqueue.RateLimitingInterface) { + // Delete event handler for the BackupStorageLocation object +} + +// Generic event handler +func (VeleroBackupStorageLocationHandler) Generic(_ context.Context, _ event.GenericEvent, _ workqueue.RateLimitingInterface) { + // Generic event handler for the BackupStorageLocation object +} diff --git a/internal/predicate/composite_nabsl_predicate.go b/internal/predicate/composite_nabsl_predicate.go new file mode 100644 index 0000000..806d1b2 --- /dev/null +++ b/internal/predicate/composite_nabsl_predicate.go @@ -0,0 +1,71 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package predicate contains all event filters of the project +package predicate + +import ( + "context" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + + nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" +) + +// CompositeNaBSLPredicate is a combination of NonAdminBackupStorageLocation and Velero BackupStorageLocation event filters +type CompositeNaBSLPredicate struct { + Context context.Context + NonAdminBackupStorageLocationPredicate NonAdminBackupStorageLocationPredicate + VeleroBackupStorageLocationPredicate VeleroBackupStorageLocationPredicate +} + +// Create event filter only accepts NonAdminBackupStorageLocation create events +func (p CompositeNaBSLPredicate) Create(evt event.CreateEvent) bool { + switch evt.Object.(type) { + case *nacv1alpha1.NonAdminBackupStorageLocation: + return p.NonAdminBackupStorageLocationPredicate.Create(p.Context, evt) + default: + return false + } +} + +// Update event filter accepts both NonAdminBackupStorageLocation and Velero BackupStorageLocation update events +func (p CompositeNaBSLPredicate) Update(evt event.UpdateEvent) bool { + switch evt.ObjectNew.(type) { + case *nacv1alpha1.NonAdminBackupStorageLocation: + return p.NonAdminBackupStorageLocationPredicate.Update(p.Context, evt) + case *velerov1.BackupStorageLocation: + return p.VeleroBackupStorageLocationPredicate.Update(p.Context, evt) + default: + return false + } +} + +// Delete event filter only accepts NonAdminBackupStorageLocation delete events +func (p CompositeNaBSLPredicate) Delete(evt event.DeleteEvent) bool { + switch evt.Object.(type) { + case *nacv1alpha1.NonAdminBackupStorageLocation: + return p.NonAdminBackupStorageLocationPredicate.Delete(p.Context, evt) + default: + return false + } +} + +// Generic event filter does not accept any generic events +func (CompositeNaBSLPredicate) Generic(_ event.GenericEvent) bool { + return false +} diff --git a/internal/predicate/nonadminbackupstoragelocation_predicate.go b/internal/predicate/nonadminbackupstoragelocation_predicate.go new file mode 100644 index 0000000..d4f98e7 --- /dev/null +++ b/internal/predicate/nonadminbackupstoragelocation_predicate.go @@ -0,0 +1,58 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/migtools/oadp-non-admin/internal/common/function" +) + +const nonAdminBackupStorageLocationPredicateKey = "NonAdminBackupStorageLocationPredicate" + +// NonAdminBackupStorageLocationPredicate contains event filters for Non Admin Backup Storage Location objects +type NonAdminBackupStorageLocationPredicate struct{} + +// Create event filter accepts all NonAdminBackupStorageLocation create events +func (NonAdminBackupStorageLocationPredicate) Create(ctx context.Context, evt event.CreateEvent) bool { + logger := function.GetLogger(ctx, evt.Object, nonAdminBackupStorageLocationPredicateKey) + logger.V(1).Info("Accepted Create event") + return true +} + +// Update event filter only accepts NonAdminBackupStorageLocation update events that include spec change +func (NonAdminBackupStorageLocationPredicate) Update(ctx context.Context, evt event.UpdateEvent) bool { + logger := function.GetLogger(ctx, evt.ObjectNew, nonAdminBackupStorageLocationPredicateKey) + + // spec change + if evt.ObjectNew.GetGeneration() != evt.ObjectOld.GetGeneration() { + logger.V(1).Info("Accepted Update event") + return true + } + + logger.V(1).Info("Rejected Update event") + return false +} + +// Delete event filter accepts all NonAdminBackupStorageLocation delete events +func (NonAdminBackupStorageLocationPredicate) Delete(ctx context.Context, evt event.DeleteEvent) bool { + logger := function.GetLogger(ctx, evt.Object, nonAdminBackupStorageLocationPredicateKey) + logger.V(1).Info("Accepted Delete event") + return true +} diff --git a/internal/predicate/velerobackupstoragelocation_predicate.go b/internal/predicate/velerobackupstoragelocation_predicate.go new file mode 100644 index 0000000..1daeccf --- /dev/null +++ b/internal/predicate/velerobackupstoragelocation_predicate.go @@ -0,0 +1,47 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/migtools/oadp-non-admin/internal/common/function" +) + +// VeleroBackupStorageLocationPredicate contains event filters for Velero BackupStorageLocation objects +type VeleroBackupStorageLocationPredicate struct { + OADPNamespace string +} + +// Update event filter only accepts Velero Backup update events from OADP namespace +// and from Velero Backups that have required metadata +func (p VeleroBackupStorageLocationPredicate) Update(ctx context.Context, evt event.UpdateEvent) bool { + logger := function.GetLogger(ctx, evt.ObjectNew, "VeleroBackupStorageLocationPredicate") + + namespace := evt.ObjectNew.GetNamespace() + if namespace == p.OADPNamespace { + if function.CheckVeleroBackupStorageLocationMetadata(evt.ObjectNew) { + logger.V(1).Info("Accepted BackupStorageLocation Update event") + return true + } + } + + logger.V(1).Info("Rejected Update event") + return false +}