diff --git a/apis/clusterresources/v1beta1/awssecuritygroupfirewallrule_types.go b/apis/clusterresources/v1beta1/awssecuritygroupfirewallrule_types.go index 063af3fee..ab6aee06e 100644 --- a/apis/clusterresources/v1beta1/awssecuritygroupfirewallrule_types.go +++ b/apis/clusterresources/v1beta1/awssecuritygroupfirewallrule_types.go @@ -65,6 +65,15 @@ func (fr *AWSSecurityGroupFirewallRule) NewPatch() client.Patch { return client.MergeFrom(old) } +func (fr *AWSSecurityGroupFirewallRule) AttachToCluster(id string) { + fr.Status.ClusterID = id + fr.Status.ResourceState = models.CreatingEvent +} + +func (fr *AWSSecurityGroupFirewallRule) DetachFromCluster() { + fr.Status.ResourceState = models.DeletingEvent +} + func init() { SchemeBuilder.Register(&AWSSecurityGroupFirewallRule{}, &AWSSecurityGroupFirewallRuleList{}) } diff --git a/apis/clusterresources/v1beta1/awsvpcpeering_types.go b/apis/clusterresources/v1beta1/awsvpcpeering_types.go index bc9083fb9..8dc91587f 100644 --- a/apis/clusterresources/v1beta1/awsvpcpeering_types.go +++ b/apis/clusterresources/v1beta1/awsvpcpeering_types.go @@ -71,13 +71,21 @@ func (aws *AWSVPCPeering) NewPatch() client.Patch { return client.MergeFrom(old) } +func (aws *AWSVPCPeering) AttachToCluster(id string) { + aws.Status.CDCID = id + aws.Status.ResourceState = models.CreatingEvent +} + +func (aws *AWSVPCPeering) DetachFromCluster() { + aws.Status.ResourceState = models.DeletingEvent +} + func init() { SchemeBuilder.Register(&AWSVPCPeering{}, &AWSVPCPeeringList{}) } type immutableAWSVPCPeeringFields struct { specificFields specificAWSVPCPeeringFields - peering immutablePeeringFields } type specificAWSVPCPeeringFields struct { @@ -91,9 +99,6 @@ func (aws *AWSVPCPeeringSpec) newImmutableFields() *immutableAWSVPCPeeringFields peerAWSAccountID: aws.PeerAWSAccountID, peerRegion: aws.PeerRegion, }, - immutablePeeringFields{ - DataCentreID: aws.DataCentreID, - }, } } @@ -106,8 +111,7 @@ func (aws *AWSVPCPeeringSpec) ValidateUpdate(oldSpec AWSVPCPeeringSpec) error { return err } - if newImmutableFields.peering != oldImmutableFields.peering || - newImmutableFields.specificFields != oldImmutableFields.specificFields { + if newImmutableFields.specificFields != oldImmutableFields.specificFields { return fmt.Errorf("cannot update immutable fields: old spec: %+v: new spec: %+v", oldSpec, aws) } @@ -125,11 +129,6 @@ func (aws *AWSVPCPeeringSpec) Validate(availableRegions []string) error { return fmt.Errorf("VPC ID must begin with 'vpc-' and fit pattern: %s. %v", models.PeerVPCIDRegExp, err) } - dataCentreIDMatched, err := regexp.Match(models.UUIDStringRegExp, []byte(aws.DataCentreID)) - if !dataCentreIDMatched || err != nil { - return fmt.Errorf("data centre ID is a UUID formated string. It must fit the pattern: %s. %v", models.UUIDStringRegExp, err) - } - if !validation.Contains(aws.PeerRegion, availableRegions) { return fmt.Errorf("AWS Region to peer: %s is unavailable, available regions: %v", aws.PeerRegion, availableRegions) diff --git a/apis/clusterresources/v1beta1/awsvpcpeering_webhook.go b/apis/clusterresources/v1beta1/awsvpcpeering_webhook.go index edda260c7..af23491a3 100644 --- a/apis/clusterresources/v1beta1/awsvpcpeering_webhook.go +++ b/apis/clusterresources/v1beta1/awsvpcpeering_webhook.go @@ -67,10 +67,6 @@ func (r *AWSVPCPeering) ValidateCreate() error { return fmt.Errorf("peer AWS Account Region is empty") } - if r.Spec.DataCentreID == "" { - return fmt.Errorf("dataCentre ID is empty") - } - if r.Spec.PeerSubnets == nil { return fmt.Errorf("peer Subnets list is empty") } diff --git a/apis/clusterresources/v1beta1/azurevnetpeering_types.go b/apis/clusterresources/v1beta1/azurevnetpeering_types.go index 4de680fb0..272b1febb 100644 --- a/apis/clusterresources/v1beta1/azurevnetpeering_types.go +++ b/apis/clusterresources/v1beta1/azurevnetpeering_types.go @@ -71,19 +71,20 @@ func (azure *AzureVNetPeering) NewPatch() client.Patch { return client.MergeFrom(old) } +func (azure *AzureVNetPeering) AttachToCluster(id string) { + azure.Status.CDCID = id + azure.Status.ResourceState = models.CreatingEvent +} + +func (azure *AzureVNetPeering) DetachFromCluster() { + azure.Status.ResourceState = models.DeletingEvent +} + func init() { SchemeBuilder.Register(&AzureVNetPeering{}, &AzureVNetPeeringList{}) } func (azure *AzureVNetPeeringSpec) Validate() error { - dataCentreIDMatched, err := regexp.Match(models.UUIDStringRegExp, []byte(azure.DataCentreID)) - if err != nil { - return err - } - if !dataCentreIDMatched { - return fmt.Errorf("data centre ID is a UUID formated string. It must fit the pattern: %s", models.UUIDStringRegExp) - } - for _, subnet := range azure.PeerSubnets { peerSubnetMatched, err := regexp.Match(models.PeerSubnetsRegExp, []byte(subnet)) if err != nil { diff --git a/apis/clusterresources/v1beta1/azurevnetpeering_webhook.go b/apis/clusterresources/v1beta1/azurevnetpeering_webhook.go index 39ab9c7e3..7be21f7a1 100644 --- a/apis/clusterresources/v1beta1/azurevnetpeering_webhook.go +++ b/apis/clusterresources/v1beta1/azurevnetpeering_webhook.go @@ -71,10 +71,6 @@ func (r *AzureVNetPeering) ValidateCreate() error { return fmt.Errorf("peer Subscription ID is empty") } - if r.Spec.DataCentreID == "" { - return fmt.Errorf("dataCentre ID is empty") - } - if r.Spec.PeerSubnets == nil { return fmt.Errorf("peer Subnets list is empty") } diff --git a/apis/clusterresources/v1beta1/clusterbackup_types.go b/apis/clusterresources/v1beta1/clusterbackup_types.go index be942299d..e6695fb4a 100644 --- a/apis/clusterresources/v1beta1/clusterbackup_types.go +++ b/apis/clusterresources/v1beta1/clusterbackup_types.go @@ -27,7 +27,6 @@ import ( // ClusterBackupSpec defines the desired state of ClusterBackup type ClusterBackupSpec struct { - ClusterID string `json:"clusterId"` ClusterKind string `json:"clusterKind"` } @@ -37,6 +36,7 @@ type ClusterBackupStatus struct { Progress string `json:"progress,omitempty"` Start int `json:"start,omitempty"` End int `json:"end,omitempty"` + ClusterID string `json:"clusterId,omitempty"` } //+kubebuilder:object:root=true @@ -71,6 +71,14 @@ func (cbs *ClusterBackupStatus) UpdateStatus(instBackup *models.BackupEvent) { cbs.Progress = fmt.Sprintf("%f", instBackup.Progress) } +func (cb *ClusterBackup) AttachToCluster(id string) { + cb.Status.ClusterID = id +} + +func (cb *ClusterBackup) DetachFromCluster() { + +} + func init() { SchemeBuilder.Register(&ClusterBackup{}, &ClusterBackupList{}) } diff --git a/apis/clusterresources/v1beta1/clusternetworkfirewallrule_types.go b/apis/clusterresources/v1beta1/clusternetworkfirewallrule_types.go index af3573106..98e60e870 100644 --- a/apis/clusterresources/v1beta1/clusternetworkfirewallrule_types.go +++ b/apis/clusterresources/v1beta1/clusternetworkfirewallrule_types.go @@ -65,6 +65,15 @@ func (fr *ClusterNetworkFirewallRule) NewPatch() client.Patch { return client.MergeFrom(old) } +func (fr *ClusterNetworkFirewallRule) AttachToCluster(id string) { + fr.Status.ClusterID = id + fr.Status.ResourceState = models.CreatingEvent +} + +func (fr *ClusterNetworkFirewallRule) DetachFromCluster() { + fr.Status.ResourceState = models.DeletingEvent +} + func init() { SchemeBuilder.Register(&ClusterNetworkFirewallRule{}, &ClusterNetworkFirewallRuleList{}) } diff --git a/apis/clusterresources/v1beta1/exclusionwindow_types.go b/apis/clusterresources/v1beta1/exclusionwindow_types.go index c4b9c3b9f..723cca31d 100644 --- a/apis/clusterresources/v1beta1/exclusionwindow_types.go +++ b/apis/clusterresources/v1beta1/exclusionwindow_types.go @@ -19,11 +19,12 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/instaclustr/operator/pkg/models" ) // ExclusionWindowSpec defines the desired state of ExclusionWindow type ExclusionWindowSpec struct { - ClusterID string `json:"clusterId"` DayOfWeek string `json:"dayOfWeek"` //+kubebuilder:validation:Minimum:=0 //+kubebuilder:validation:Maximum:=23 @@ -36,7 +37,9 @@ type ExclusionWindowSpec struct { // ExclusionWindowStatus defines the observed state of ExclusionWindow type ExclusionWindowStatus struct { - ID string `json:"id"` + ID string `json:"id,omitempty"` + ClusterID string `json:"clusterId,omitempty"` + ResourceState string `json:"resourceState,omitempty"` } //+kubebuilder:object:root=true @@ -60,11 +63,20 @@ type ExclusionWindowList struct { Items []ExclusionWindow `json:"items"` } +func (ew *ExclusionWindow) AttachToCluster(id string) { + ew.Status.ClusterID = id + ew.Status.ResourceState = models.CreatingEvent +} + +func (ew *ExclusionWindow) DetachFromCluster() { + ew.Status.ResourceState = models.DeletingEvent +} + func init() { SchemeBuilder.Register(&ExclusionWindow{}, &ExclusionWindowList{}) } -func (r *ExclusionWindow) NewPatch() client.Patch { - old := r.DeepCopy() +func (ew *ExclusionWindow) NewPatch() client.Patch { + old := ew.DeepCopy() return client.MergeFrom(old) } diff --git a/apis/clusterresources/v1beta1/gcpvpcpeering_types.go b/apis/clusterresources/v1beta1/gcpvpcpeering_types.go index 64e41d2b9..40f3c1334 100644 --- a/apis/clusterresources/v1beta1/gcpvpcpeering_types.go +++ b/apis/clusterresources/v1beta1/gcpvpcpeering_types.go @@ -69,19 +69,20 @@ func (gcp *GCPVPCPeering) NewPatch() client.Patch { return client.MergeFrom(old) } +func (gcp *GCPVPCPeering) AttachToCluster(id string) { + gcp.Status.CDCID = id + gcp.Status.ResourceState = models.CreatingEvent +} + +func (gcp *GCPVPCPeering) DetachFromCluster() { + gcp.Status.ResourceState = models.DeletingEvent +} + func init() { SchemeBuilder.Register(&GCPVPCPeering{}, &GCPVPCPeeringList{}) } func (gcp *GCPVPCPeeringSpec) Validate() error { - dataCentreIDMatched, err := regexp.Match(models.UUIDStringRegExp, []byte(gcp.DataCentreID)) - if err != nil { - return err - } - if !dataCentreIDMatched { - return fmt.Errorf("data centre ID is a UUID formated string. It must fit the pattern: %s", models.UUIDStringRegExp) - } - for _, subnet := range gcp.PeerSubnets { peerSubnetMatched, err := regexp.Match(models.PeerSubnetsRegExp, []byte(subnet)) if err != nil { diff --git a/apis/clusterresources/v1beta1/gcpvpcpeering_webhook.go b/apis/clusterresources/v1beta1/gcpvpcpeering_webhook.go index 72c5d2ce3..2c2cd026d 100644 --- a/apis/clusterresources/v1beta1/gcpvpcpeering_webhook.go +++ b/apis/clusterresources/v1beta1/gcpvpcpeering_webhook.go @@ -67,10 +67,6 @@ func (r *GCPVPCPeering) ValidateCreate() error { return fmt.Errorf("peer Project ID is empty") } - if r.Spec.DataCentreID == "" { - return fmt.Errorf("dataCentre ID is empty") - } - if r.Spec.PeerSubnets == nil { return fmt.Errorf("peer Subnets list is empty") } diff --git a/apis/clusterresources/v1beta1/maintenanceevents_types.go b/apis/clusterresources/v1beta1/maintenanceevents_types.go index 819552baa..3190748de 100644 --- a/apis/clusterresources/v1beta1/maintenanceevents_types.go +++ b/apis/clusterresources/v1beta1/maintenanceevents_types.go @@ -23,7 +23,6 @@ import ( // MaintenanceEventsSpec defines the desired state of MaintenanceEvents type MaintenanceEventsSpec struct { - ClusterID string `json:"clusterId"` MaintenanceEventsReschedules []*MaintenanceEventReschedule `json:"maintenanceEventsReschedule"` } diff --git a/apis/clusterresources/v1beta1/structs.go b/apis/clusterresources/v1beta1/structs.go index 0670d6da4..bc4bcdcaf 100644 --- a/apis/clusterresources/v1beta1/structs.go +++ b/apis/clusterresources/v1beta1/structs.go @@ -21,8 +21,7 @@ import ( ) type VPCPeeringSpec struct { - DataCentreID string `json:"cdcId"` - PeerSubnets []string `json:"peerSubnets"` + PeerSubnets []string `json:"peerSubnets"` } type PeeringStatus struct { @@ -30,6 +29,8 @@ type PeeringStatus struct { StatusCode string `json:"statusCode,omitempty"` Name string `json:"name,omitempty"` FailureReason string `json:"failureReason,omitempty"` + CDCID string `json:"cdcid,omitempty"` + ResourceState string `json:"resourceState,omitempty"` } type PatchRequest struct { @@ -39,18 +40,15 @@ type PatchRequest struct { } type FirewallRuleSpec struct { - ClusterID string `json:"clusterId"` - Type string `json:"type"` + Type string `json:"type"` } type FirewallRuleStatus struct { ID string `json:"id,omitempty"` DeferredReason string `json:"deferredReason,omitempty"` Status string `json:"status,omitempty"` -} - -type immutablePeeringFields struct { - DataCentreID string + ClusterID string `json:"clusterId,omitempty"` + ResourceState string `json:"resourceState,omitempty"` } type SecretReference struct { diff --git a/apis/clusters/v1beta1/cadence_types.go b/apis/clusters/v1beta1/cadence_types.go index f3b394f27..b065322ff 100644 --- a/apis/clusters/v1beta1/cadence_types.go +++ b/apis/clusters/v1beta1/cadence_types.go @@ -73,6 +73,7 @@ type CadenceSpec struct { PackagedProvisioning []*PackagedProvisioning `json:"packagedProvisioning,omitempty"` TargetPrimaryCadence []*TargetCadence `json:"targetPrimaryCadence,omitempty"` ResizeSettings []*ResizeSettings `json:"resizeSettings,omitempty"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` } type AWSArchival struct { diff --git a/apis/clusters/v1beta1/cassandra_types.go b/apis/clusters/v1beta1/cassandra_types.go index cd19c9374..4ceab739a 100644 --- a/apis/clusters/v1beta1/cassandra_types.go +++ b/apis/clusters/v1beta1/cassandra_types.go @@ -63,6 +63,7 @@ type CassandraSpec struct { Spark []*Spark `json:"spark,omitempty"` BundledUseOnly bool `json:"bundledUseOnly,omitempty"` UserRefs References `json:"userRefs,omitempty"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` //+kubebuilder:validate:MaxItems:=1 ResizeSettings []*ResizeSettings `json:"resizeSettings,omitempty"` } @@ -152,7 +153,6 @@ func (c *Cassandra) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1.C Finalizers: []string{models.DeletionFinalizer}, }, Spec: clusterresourcesv1beta1.ClusterBackupSpec{ - ClusterID: c.Status.ID, ClusterKind: models.CassandraClusterKind, }, } diff --git a/apis/clusters/v1beta1/kafka_types.go b/apis/clusters/v1beta1/kafka_types.go index d4c51261c..1debe5122 100644 --- a/apis/clusters/v1beta1/kafka_types.go +++ b/apis/clusters/v1beta1/kafka_types.go @@ -89,6 +89,7 @@ type KafkaSpec struct { KarapaceSchemaRegistry []*KarapaceSchemaRegistry `json:"karapaceSchemaRegistry,omitempty"` BundledUseOnly bool `json:"bundledUseOnly,omitempty"` UserRefs References `json:"userRefs,omitempty"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` Kraft []*Kraft `json:"kraft,omitempty"` } diff --git a/apis/clusters/v1beta1/kafkaconnect_types.go b/apis/clusters/v1beta1/kafkaconnect_types.go index a101ee454..59466dfcc 100644 --- a/apis/clusters/v1beta1/kafkaconnect_types.go +++ b/apis/clusters/v1beta1/kafkaconnect_types.go @@ -111,6 +111,7 @@ type KafkaConnectSpec struct { // CustomConnectors defines the location for custom connector storage and access info. CustomConnectors []*CustomConnectors `json:"customConnectors,omitempty"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` } // KafkaConnectStatus defines the observed state of KafkaConnect diff --git a/apis/clusters/v1beta1/opensearch_types.go b/apis/clusters/v1beta1/opensearch_types.go index db983fbe0..3de1b3a1e 100644 --- a/apis/clusters/v1beta1/opensearch_types.go +++ b/apis/clusters/v1beta1/opensearch_types.go @@ -54,6 +54,7 @@ type OpenSearchSpec struct { AlertingPlugin bool `json:"alertingPlugin,omitempty"` BundledUseOnly bool `json:"bundledUseOnly,omitempty"` UserRefs References `json:"userRefs,omitempty"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` //+kubuilder:validation:MaxItems:=1 ResizeSettings []*ResizeSettings `json:"resizeSettings,omitempty"` //+kubuilder:validation:MaxItems:=1 @@ -584,7 +585,6 @@ func (os *OpenSearch) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1 Finalizers: []string{models.DeletionFinalizer}, }, Spec: clusterresourcesv1beta1.ClusterBackupSpec{ - ClusterID: os.Status.ID, ClusterKind: models.OsClusterKind, }, } diff --git a/apis/clusters/v1beta1/postgresql_types.go b/apis/clusters/v1beta1/postgresql_types.go index bde017933..b71045fc4 100644 --- a/apis/clusters/v1beta1/postgresql_types.go +++ b/apis/clusters/v1beta1/postgresql_types.go @@ -75,10 +75,11 @@ type PgRestoreFrom struct { type PgSpec struct { PgRestoreFrom *PgRestoreFrom `json:"pgRestoreFrom,omitempty"` Cluster `json:",inline"` - DataCentres []*PgDataCentre `json:"dataCentres,omitempty"` - ClusterConfigurations map[string]string `json:"clusterConfigurations,omitempty"` - SynchronousModeStrict bool `json:"synchronousModeStrict,omitempty"` - UserRefs []*Reference `json:"userRefs,omitempty"` + DataCentres []*PgDataCentre `json:"dataCentres,omitempty"` + ClusterConfigurations map[string]string `json:"clusterConfigurations,omitempty"` + SynchronousModeStrict bool `json:"synchronousModeStrict,omitempty"` + UserRefs []*Reference `json:"userRefs,omitempty"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` //+kubebuilder:validate:MaxItems:=1 ResizeSettings []*ResizeSettings `json:"resizeSettings,omitempty"` } @@ -159,7 +160,6 @@ func (pg *PostgreSQL) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1 Finalizers: []string{models.DeletionFinalizer}, }, Spec: clusterresourcesv1beta1.ClusterBackupSpec{ - ClusterID: pg.Status.ID, ClusterKind: models.PgClusterKind, }, } diff --git a/apis/clusters/v1beta1/redis_types.go b/apis/clusters/v1beta1/redis_types.go index 02c1df4b4..d88691bbb 100644 --- a/apis/clusters/v1beta1/redis_types.go +++ b/apis/clusters/v1beta1/redis_types.go @@ -69,7 +69,8 @@ type RedisSpec struct { //+kubebuilder:validation:MaxItems:=2 DataCentres []*RedisDataCentre `json:"dataCentres,omitempty"` - UserRefs References `json:"userRefs,omitempty"` + UserRefs References `json:"userRefs,omitempty"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` //+kubebuilder:validation:MaxItems:=1 ResizeSettings []*ResizeSettings `json:"resizeSettings,omitempty"` } @@ -143,7 +144,6 @@ func (r *Redis) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1.Clust Finalizers: []string{models.DeletionFinalizer}, }, Spec: clusterresourcesv1beta1.ClusterBackupSpec{ - ClusterID: r.Status.ID, ClusterKind: models.RedisClusterKind, }, } diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go index f8444df55..dee74620a 100644 --- a/apis/clusters/v1beta1/structs.go +++ b/apis/clusters/v1beta1/structs.go @@ -45,6 +45,7 @@ type DataCentre struct { } type DataCentreStatus struct { + Name string `json:"name,omitempty"` ID string `json:"id,omitempty"` Status string `json:"status,omitempty"` Nodes []*Node `json:"nodes,omitempty"` @@ -558,6 +559,7 @@ func areClusteredMaintenanceEventStatusEqual(a, b *clusterresource.MaintenanceEv func (cs *ClusterStatus) DCFromInstAPI(iDC models.DataCentre) *DataCentreStatus { return &DataCentreStatus{ + Name: iDC.Name, ID: iDC.ID, Status: iDC.Status, Nodes: cs.NodesFromInstAPI(iDC.Nodes), @@ -713,6 +715,20 @@ func (cs *ClusterStatus) PrivateLinkStatusesEqual(iStatus *ClusterStatus) bool { return true } +type ClusterResourceRefs struct { + ClusterBackups []*ClusterResourceRef `json:"clusterBackups,omitempty"` + ClusterNetworkFirewallRules []*ClusterResourceRef `json:"clusterNetworkFirewallRules,omitempty"` + AWSVPCPeerings []*ClusterResourceRef `json:"awsVPCPeerings,omitempty"` + AWSSecurityGroupFirewallRules []*ClusterResourceRef `json:"awsSecurityGroupFirewallRules,omitempty"` + ExclusionWindows []*ClusterResourceRef `json:"exclusionWindows,omitempty"` + GCPVPCPeerings []*ClusterResourceRef `json:"gcpVPCPeerings,omitempty"` + AzureVNetPeerings []*ClusterResourceRef `json:"azureVNetPeerings,omitempty"` +} +type ClusterResourceRef struct { + Reference `json:",inline"` + DataCentreName string `json:"dataCentreName,omitempty"` +} + type Reference struct { Name string `json:"name"` Namespace string `json:"namespace"` diff --git a/apis/clusters/v1beta1/zookeeper_types.go b/apis/clusters/v1beta1/zookeeper_types.go index 11576f109..8378fdd4a 100644 --- a/apis/clusters/v1beta1/zookeeper_types.go +++ b/apis/clusters/v1beta1/zookeeper_types.go @@ -36,8 +36,9 @@ type ZookeeperDataCentre struct { // ZookeeperSpec defines the desired state of Zookeeper type ZookeeperSpec struct { - Cluster `json:",inline"` - DataCentres []*ZookeeperDataCentre `json:"dataCentres"` + Cluster `json:",inline"` + DataCentres []*ZookeeperDataCentre `json:"dataCentres"` + ClusterResources ClusterResourceRefs `json:"clusterResources,omitempty"` } // ZookeeperStatus defines the observed state of Zookeeper diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index a0cc86ca2..88797c243 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -309,6 +309,7 @@ func (in *CadenceSpec) DeepCopyInto(out *CadenceSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CadenceSpec. @@ -491,6 +492,7 @@ func (in *CassandraSpec) DeepCopyInto(out *CassandraSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) if in.ResizeSettings != nil { in, out := &in.ResizeSettings, &out.ResizeSettings *out = make([]*ResizeSettings, len(*in)) @@ -597,6 +599,114 @@ func (in *ClusterManagerNodes) DeepCopy() *ClusterManagerNodes { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceRef) DeepCopyInto(out *ClusterResourceRef) { + *out = *in + out.Reference = in.Reference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceRef. +func (in *ClusterResourceRef) DeepCopy() *ClusterResourceRef { + if in == nil { + return nil + } + out := new(ClusterResourceRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceRefs) DeepCopyInto(out *ClusterResourceRefs) { + *out = *in + if in.ClusterBackups != nil { + in, out := &in.ClusterBackups, &out.ClusterBackups + *out = make([]*ClusterResourceRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ClusterResourceRef) + **out = **in + } + } + } + if in.ClusterNetworkFirewallRules != nil { + in, out := &in.ClusterNetworkFirewallRules, &out.ClusterNetworkFirewallRules + *out = make([]*ClusterResourceRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ClusterResourceRef) + **out = **in + } + } + } + if in.AWSVPCPeerings != nil { + in, out := &in.AWSVPCPeerings, &out.AWSVPCPeerings + *out = make([]*ClusterResourceRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ClusterResourceRef) + **out = **in + } + } + } + if in.AWSSecurityGroupFirewallRules != nil { + in, out := &in.AWSSecurityGroupFirewallRules, &out.AWSSecurityGroupFirewallRules + *out = make([]*ClusterResourceRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ClusterResourceRef) + **out = **in + } + } + } + if in.ExclusionWindows != nil { + in, out := &in.ExclusionWindows, &out.ExclusionWindows + *out = make([]*ClusterResourceRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ClusterResourceRef) + **out = **in + } + } + } + if in.GCPVPCPeerings != nil { + in, out := &in.GCPVPCPeerings, &out.GCPVPCPeerings + *out = make([]*ClusterResourceRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ClusterResourceRef) + **out = **in + } + } + } + if in.AzureVNetPeerings != nil { + in, out := &in.AzureVNetPeerings, &out.AzureVNetPeerings + *out = make([]*ClusterResourceRef, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ClusterResourceRef) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceRefs. +func (in *ClusterResourceRefs) DeepCopy() *ClusterResourceRefs { + if in == nil { + return nil + } + out := new(ClusterResourceRefs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = *in @@ -1030,6 +1140,7 @@ func (in *KafkaConnectSpec) DeepCopyInto(out *KafkaConnectSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConnectSpec. @@ -1209,6 +1320,7 @@ func (in *KafkaSpec) DeepCopyInto(out *KafkaSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) if in.Kraft != nil { in, out := &in.Kraft, &out.Kraft *out = make([]*Kraft, len(*in)) @@ -1566,6 +1678,7 @@ func (in *OpenSearchSpec) DeepCopyInto(out *OpenSearchSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) if in.ResizeSettings != nil { in, out := &in.ResizeSettings, &out.ResizeSettings *out = make([]*ResizeSettings, len(*in)) @@ -1820,6 +1933,7 @@ func (in *PgSpec) DeepCopyInto(out *PgSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) if in.ResizeSettings != nil { in, out := &in.ResizeSettings, &out.ResizeSettings *out = make([]*ResizeSettings, len(*in)) @@ -2121,6 +2235,7 @@ func (in *RedisSpec) DeepCopyInto(out *RedisSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) if in.ResizeSettings != nil { in, out := &in.ResizeSettings, &out.ResizeSettings *out = make([]*ResizeSettings, len(*in)) @@ -2600,6 +2715,7 @@ func (in *ZookeeperSpec) DeepCopyInto(out *ZookeeperSpec) { } } } + in.ClusterResources.DeepCopyInto(&out.ClusterResources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperSpec. diff --git a/apis/kafkamanagement/v1beta1/kafkauser_webhook.go b/apis/kafkamanagement/v1beta1/kafkauser_webhook.go index a0c726618..b7bd420c9 100644 --- a/apis/kafkamanagement/v1beta1/kafkauser_webhook.go +++ b/apis/kafkamanagement/v1beta1/kafkauser_webhook.go @@ -69,6 +69,11 @@ func (r *KafkaUser) ValidateCreate() error { func (r *KafkaUser) ValidateUpdate(old runtime.Object) error { kafkauserlog.Info("validate update", "name", r.Name) + oldUser := old.(*KafkaUser) + if *r.Spec.SecretRef != *oldUser.Spec.SecretRef { + return models.ErrImmutableSecretRef + } + for _, request := range r.Spec.CertificateRequests { if request.CSR == "" { if request.Organization == "" || request.OrganizationalUnit == "" || request.Country == "" || request.CommonName == "" { diff --git a/config/crd/bases/clusterresources.instaclustr.com_awssecuritygroupfirewallrules.yaml b/config/crd/bases/clusterresources.instaclustr.com_awssecuritygroupfirewallrules.yaml index 66567ac8c..5659a82ce 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_awssecuritygroupfirewallrules.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_awssecuritygroupfirewallrules.yaml @@ -37,14 +37,11 @@ spec: description: AWSSecurityGroupFirewallRuleSpec defines the desired state of AWSSecurityGroupFirewallRule properties: - clusterId: - type: string securityGroupId: type: string type: type: string required: - - clusterId - securityGroupId - type type: object @@ -52,10 +49,14 @@ spec: description: AWSSecurityGroupFirewallRuleStatus defines the observed state of AWSSecurityGroupFirewallRule properties: + clusterId: + type: string deferredReason: type: string id: type: string + resourceState: + type: string status: type: string type: object diff --git a/config/crd/bases/clusterresources.instaclustr.com_awsvpcpeerings.yaml b/config/crd/bases/clusterresources.instaclustr.com_awsvpcpeerings.yaml index 4bde626a6..f701ff0be 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_awsvpcpeerings.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_awsvpcpeerings.yaml @@ -35,8 +35,6 @@ spec: spec: description: AWSVPCPeeringSpec defines the desired state of AWSVPCPeering properties: - cdcId: - type: string peerAwsAccountId: type: string peerRegion: @@ -48,7 +46,6 @@ spec: peerVpcId: type: string required: - - cdcId - peerAwsAccountId - peerSubnets - peerVpcId @@ -56,12 +53,16 @@ spec: status: description: AWSVPCPeeringStatus defines the observed state of AWSVPCPeering properties: + cdcid: + type: string failureReason: type: string id: type: string name: type: string + resourceState: + type: string statusCode: type: string type: object diff --git a/config/crd/bases/clusterresources.instaclustr.com_azurevnetpeerings.yaml b/config/crd/bases/clusterresources.instaclustr.com_azurevnetpeerings.yaml index b29ca2c93..e69c2aa33 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_azurevnetpeerings.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_azurevnetpeerings.yaml @@ -35,8 +35,6 @@ spec: spec: description: AzureVNetPeeringSpec defines the desired state of AzureVNetPeering properties: - cdcId: - type: string peerAdObjectId: type: string peerResourceGroup: @@ -50,7 +48,6 @@ spec: peerVirtualNetworkName: type: string required: - - cdcId - peerResourceGroup - peerSubnets - peerSubscriptionId @@ -59,12 +56,16 @@ spec: status: description: AzureVNetPeeringStatus defines the observed state of AzureVNetPeering properties: + cdcid: + type: string failureReason: type: string id: type: string name: type: string + resourceState: + type: string statusCode: type: string type: object diff --git a/config/crd/bases/clusterresources.instaclustr.com_clusterbackups.yaml b/config/crd/bases/clusterresources.instaclustr.com_clusterbackups.yaml index 8e9b8bc17..781f99348 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_clusterbackups.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_clusterbackups.yaml @@ -35,17 +35,16 @@ spec: spec: description: ClusterBackupSpec defines the desired state of ClusterBackup properties: - clusterId: - type: string clusterKind: type: string required: - - clusterId - clusterKind type: object status: description: ClusterBackupStatus defines the observed state of ClusterBackup properties: + clusterId: + type: string end: type: integer operationStatus: diff --git a/config/crd/bases/clusterresources.instaclustr.com_clusternetworkfirewallrules.yaml b/config/crd/bases/clusterresources.instaclustr.com_clusternetworkfirewallrules.yaml index 714e426fa..bee765e3c 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_clusternetworkfirewallrules.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_clusternetworkfirewallrules.yaml @@ -37,14 +37,11 @@ spec: description: ClusterNetworkFirewallRuleSpec defines the desired state of ClusterNetworkFirewallRule properties: - clusterId: - type: string network: type: string type: type: string required: - - clusterId - network - type type: object @@ -52,10 +49,14 @@ spec: description: ClusterNetworkFirewallRuleStatus defines the observed state of ClusterNetworkFirewallRule properties: + clusterId: + type: string deferredReason: type: string id: type: string + resourceState: + type: string status: type: string type: object diff --git a/config/crd/bases/clusterresources.instaclustr.com_exclusionwindows.yaml b/config/crd/bases/clusterresources.instaclustr.com_exclusionwindows.yaml index 81d6e8ab1..6991c6b5a 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_exclusionwindows.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_exclusionwindows.yaml @@ -35,8 +35,6 @@ spec: spec: description: ExclusionWindowSpec defines the desired state of ExclusionWindow properties: - clusterId: - type: string dayOfWeek: type: string durationInHours: @@ -49,7 +47,6 @@ spec: minimum: 0 type: integer required: - - clusterId - dayOfWeek - durationInHours - startHour @@ -57,10 +54,12 @@ spec: status: description: ExclusionWindowStatus defines the observed state of ExclusionWindow properties: + clusterId: + type: string id: type: string - required: - - id + resourceState: + type: string type: object type: object served: true diff --git a/config/crd/bases/clusterresources.instaclustr.com_gcpvpcpeerings.yaml b/config/crd/bases/clusterresources.instaclustr.com_gcpvpcpeerings.yaml index 1fbad4150..787b8afb5 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_gcpvpcpeerings.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_gcpvpcpeerings.yaml @@ -35,8 +35,6 @@ spec: spec: description: GCPVPCPeeringSpec defines the desired state of GCPVPCPeering properties: - cdcId: - type: string peerProjectId: type: string peerSubnets: @@ -46,7 +44,6 @@ spec: peerVpcNetworkName: type: string required: - - cdcId - peerProjectId - peerSubnets - peerVpcNetworkName @@ -54,12 +51,16 @@ spec: status: description: GCPVPCPeeringStatus defines the observed state of GCPVPCPeering properties: + cdcid: + type: string failureReason: type: string id: type: string name: type: string + resourceState: + type: string statusCode: type: string type: object diff --git a/config/crd/bases/clusterresources.instaclustr.com_maintenanceevents.yaml b/config/crd/bases/clusterresources.instaclustr.com_maintenanceevents.yaml index e7a5ce71c..f1eac4d71 100644 --- a/config/crd/bases/clusterresources.instaclustr.com_maintenanceevents.yaml +++ b/config/crd/bases/clusterresources.instaclustr.com_maintenanceevents.yaml @@ -35,8 +35,6 @@ spec: spec: description: MaintenanceEventsSpec defines the desired state of MaintenanceEvents properties: - clusterId: - type: string maintenanceEventsReschedule: items: properties: @@ -50,7 +48,6 @@ spec: type: object type: array required: - - clusterId - maintenanceEventsReschedule type: object status: diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml index 1fd0ba456..ef1005f74 100644 --- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml @@ -66,6 +66,107 @@ spec: - awsAccessKeySecretNamespace type: object type: array + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object dataCentres: items: properties: @@ -313,6 +414,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml index b92d77a37..7611a26cd 100644 --- a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml @@ -50,6 +50,107 @@ spec: properties: bundledUseOnly: type: boolean + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object dataCentres: items: properties: @@ -247,6 +348,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml index 87209b6b0..b06f12f54 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml @@ -48,6 +48,107 @@ spec: spec: description: KafkaConnectSpec defines the desired state of KafkaConnect properties: + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object customConnectors: description: CustomConnectors defines the location for custom connector storage and access info. @@ -271,6 +372,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml index a6e3de764..b17a32d10 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml @@ -58,6 +58,107 @@ spec: type: boolean clientToClusterEncryption: type: boolean + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object dataCentres: items: properties: @@ -264,6 +365,18 @@ spec: status: description: KafkaStatus defines the observed state of Kafka properties: + availableUsers: + items: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array cdcid: type: string currentClusterOperationStatus: @@ -275,6 +388,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/crd/bases/clusters.instaclustr.com_opensearches.yaml b/config/crd/bases/clusters.instaclustr.com_opensearches.yaml index 9852b8b2b..8eb061f79 100644 --- a/config/crd/bases/clusters.instaclustr.com_opensearches.yaml +++ b/config/crd/bases/clusters.instaclustr.com_opensearches.yaml @@ -68,6 +68,107 @@ spec: - nodeSize type: object type: array + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object dataCentres: items: properties: @@ -289,6 +390,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml index 5ec93a122..453fe8586 100644 --- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml +++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml @@ -52,6 +52,107 @@ spec: additionalProperties: type: string type: object + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object dataCentres: items: properties: @@ -241,6 +342,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/crd/bases/clusters.instaclustr.com_redis.yaml b/config/crd/bases/clusters.instaclustr.com_redis.yaml index df4104371..4d8b770a8 100644 --- a/config/crd/bases/clusters.instaclustr.com_redis.yaml +++ b/config/crd/bases/clusters.instaclustr.com_redis.yaml @@ -51,6 +51,107 @@ spec: clientEncryption: description: Enables client to node encryption type: boolean + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object dataCentres: items: properties: @@ -241,6 +342,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml index 412c538db..dfae1aac1 100644 --- a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml +++ b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml @@ -48,6 +48,107 @@ spec: spec: description: ZookeeperSpec defines the desired state of Zookeeper properties: + clusterResources: + properties: + awsSecurityGroupFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + awsVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + azureVNetPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterBackups: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + clusterNetworkFirewallRules: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + exclusionWindows: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + gcpVPCPeerings: + items: + properties: + dataCentreName: + type: string + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array + type: object dataCentres: items: properties: @@ -148,6 +249,8 @@ spec: type: string id: type: string + name: + type: string nodeNumber: type: integer nodes: diff --git a/config/samples/clusterresources_v1beta1_awssecuritygroupfirewallrule.yaml b/config/samples/clusterresources_v1beta1_awssecuritygroupfirewallrule.yaml index d90f56e8c..81645a5ad 100644 --- a/config/samples/clusterresources_v1beta1_awssecuritygroupfirewallrule.yaml +++ b/config/samples/clusterresources_v1beta1_awssecuritygroupfirewallrule.yaml @@ -9,6 +9,6 @@ metadata: app.kubernetes.io/created-by: operator name: awssecuritygroupfirewallrule-sample spec: - securityGroupId: sg-0d681e2d0fe0f0a39 - clusterId: ef924204-3139-43e9-8e03-c29278e6eccd + securityGroupId: sg-0ab978e9e4f443cc8 +# clusterId: ef924204-3139-43e9-8e03-c29278e6eccd type: POSTGRESQL diff --git a/config/samples/clusterresources_v1beta1_awsvpcpeering.yaml b/config/samples/clusterresources_v1beta1_awsvpcpeering.yaml index b0112bdd7..6dfe90004 100644 --- a/config/samples/clusterresources_v1beta1_awsvpcpeering.yaml +++ b/config/samples/clusterresources_v1beta1_awsvpcpeering.yaml @@ -15,4 +15,4 @@ spec: - "192.168.0.0/16" peerVpcId: "vpc-87241ae1" peerRegion: "US_EAST_1" - cdcId: "85b26d7e-f8ff-4ce6-9fd1-b0d25e6659a9" \ No newline at end of file +# cdcId: "249e86af-7afa-4674-8fab-10250661c5b4" \ No newline at end of file diff --git a/config/samples/clusterresources_v1beta1_azurevnetpeering.yaml b/config/samples/clusterresources_v1beta1_azurevnetpeering.yaml index 69330698e..5e06b3d25 100644 --- a/config/samples/clusterresources_v1beta1_azurevnetpeering.yaml +++ b/config/samples/clusterresources_v1beta1_azurevnetpeering.yaml @@ -1,11 +1,11 @@ apiVersion: clusterresources.instaclustr.com/v1beta1 kind: AzureVNetPeering metadata: - name: azurevnetpeering-sample + name: azurevnetpeering-sample-trough spec: - cdcId: f8581465-098c-4576-9e52-ea8308a27d8a +# cdcId: f8581465-098c-4576-9e52-ea8308a27d8a peerResourceGroup: rnd peerSubnets: - 10.224.0.0/16 peerSubscriptionId: 1a2f3ab8-6815-49c5-a47e-b1a354b51240 - peerVirtualNetworkName: aks-vnet-17973335 + peerVirtualNetworkName: aks-vnet-17973335222666 diff --git a/config/samples/clusterresources_v1beta1_clusterbackup.yaml b/config/samples/clusterresources_v1beta1_clusterbackup.yaml index 5c1afa170..9ee13b783 100644 --- a/config/samples/clusterresources_v1beta1_clusterbackup.yaml +++ b/config/samples/clusterresources_v1beta1_clusterbackup.yaml @@ -3,5 +3,5 @@ kind: ClusterBackup metadata: name: clusterbackup-sample spec: - clusterId: 2ae611cf-ac91-4325-941c-a35c043f9c34 +# clusterId: 2ae611cf-ac91-4325-941c-a35c043f9c34 clusterKind: PostgreSQL \ No newline at end of file diff --git a/config/samples/clusterresources_v1beta1_clusternetworkfirewallrule.yaml b/config/samples/clusterresources_v1beta1_clusternetworkfirewallrule.yaml index b475ddf1f..d5412190c 100644 --- a/config/samples/clusterresources_v1beta1_clusternetworkfirewallrule.yaml +++ b/config/samples/clusterresources_v1beta1_clusternetworkfirewallrule.yaml @@ -9,6 +9,6 @@ metadata: app.kubernetes.io/created-by: operator name: clusternetworkfirewallrule-sample spec: - network: 62.212.64.19/32 - clusterId: 944cfe6b-441f-4c5a-865b-42fd40c7d816 - type: KAFKA + network: 54.198.214.167/32 +# clusterId: c47c6a1c-9e2b-4a5c-aa5e-7013bceecd09 + type: POSTGRESQL diff --git a/config/samples/clusterresources_v1beta1_exclusionwindow.yaml b/config/samples/clusterresources_v1beta1_exclusionwindow.yaml index 56abb7e9a..022f6821f 100644 --- a/config/samples/clusterresources_v1beta1_exclusionwindow.yaml +++ b/config/samples/clusterresources_v1beta1_exclusionwindow.yaml @@ -3,7 +3,7 @@ kind: ExclusionWindow metadata: name: exclusionwindow-sample spec: - clusterId: "4b453851-9002-475a-a603-f8fb1e0ae7df" +# clusterId: "d72b0c01-d263-40c7-8d3d-adb837602647" dayOfWeek: "MONDAY" startHour: 10 durationInHours: 40 diff --git a/config/samples/clusterresources_v1beta1_gcpvpcpeering.yaml b/config/samples/clusterresources_v1beta1_gcpvpcpeering.yaml index 7b6cd3318..1b2974d30 100644 --- a/config/samples/clusterresources_v1beta1_gcpvpcpeering.yaml +++ b/config/samples/clusterresources_v1beta1_gcpvpcpeering.yaml @@ -6,6 +6,6 @@ spec: cdcId: ab974700-1ba9-4fcd-8399-3dc83fc2a3c3 peerProjectId: netapp-hcl-seclab peerSubnets: - - 192.168.0.0/16 - - 172.16.0.0/16 - peerVpcNetworkName: hcl-seclab-client-vpc1 + - 192.169.0.0/16 + - 172.17.0.0/16 + peerVpcNetworkName: hcl-seclab-client-vpc57x diff --git a/config/samples/clusterresources_v1beta1_maintenanceevents.yaml b/config/samples/clusterresources_v1beta1_maintenanceevents.yaml index 307734950..63fad8fbd 100644 --- a/config/samples/clusterresources_v1beta1_maintenanceevents.yaml +++ b/config/samples/clusterresources_v1beta1_maintenanceevents.yaml @@ -3,9 +3,6 @@ kind: MaintenanceEvents metadata: name: maintenanceevents-sample spec: - clusterId: "9cf09a53-a09e-450a-ba7d-e98b3c724911" maintenanceEventsReschedule: - - scheduledStartTime: "2023-11-09T04:30:00Z" - maintenanceEventId: "0d25b466-bc22-44a8-b15d-8f92e815cb6e" - - scheduledStartTime: "2023-11-15T06:00:00Z" - maintenanceEventId: "d4806381-cd1e-48df-b9ba-70f9b0829c72" + - scheduledStartTime: "2023-11-09T02:30:00Z" + maintenanceEventId: "d9199351-8438-4da9-8828-ab7a0dde640e" diff --git a/config/samples/clusters_v1beta1_cadence.yaml b/config/samples/clusters_v1beta1_cadence.yaml index 72b302978..8f1abd109 100644 --- a/config/samples/clusters_v1beta1_cadence.yaml +++ b/config/samples/clusters_v1beta1_cadence.yaml @@ -53,6 +53,29 @@ spec: clientEncryption: false # privateLink: # - advertisedHostname: "cadence-sample-test.com" + clusterResources: + clusterNetworkFirewallRules: +# - namespace: default +# name: clusternetworkfirewallrule-sample + awsVPCPeerings: +# - namespace: default +# name: awsvpcpeering-sample + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: + # - namespace: default + # name: gcpvpcpeering-sample + # - namespace: default + # name: gcpvpcpeering-sample-two + azureVNetPeerings: + # - namespace: default + # name: azurevnetpeering-sample + # - namespace: default + # name: azurevnetpeering-sample-trough slaTier: "NON_PRODUCTION" useCadenceWebAuth: false # targetPrimaryCadence: diff --git a/config/samples/clusters_v1beta1_cassandra.yaml b/config/samples/clusters_v1beta1_cassandra.yaml index b0e1cb1a0..38a0eede1 100644 --- a/config/samples/clusters_v1beta1_cassandra.yaml +++ b/config/samples/clusters_v1beta1_cassandra.yaml @@ -44,6 +44,38 @@ spec: # name: cassandrauser-sample2 # - namespace: default # name: cassandrauser-sample3 + clusterResources: + clusterBackups: +# - namespace: default +# name: clusterbackup-sample +# - namespace: default +# name: clusterbackup-sample-two + clusterNetworkFirewallRules: +# - namespace: default +# name: clusternetworkfirewallrule-sample + awsVPCPeerings: +# - namespace: default +# name: awsvpcpeering-sample +# dataCentreName: AWS_cassandra +# - namespace: default +# name: awsvpcpeering-sample-two +# dataCentreName: AWS_cassandra2 + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: +# - namespace: default +# name: gcpvpcpeering-sample +# - namespace: default +# name: gcpvpcpeering-sample-two + azureVNetPeerings: +# - namespace: default +# name: azurevnetpeering-sample +# - namespace: default +# name: azurevnetpeering-sample-trough slaTier: "NON_PRODUCTION" # resizeSettings: # - notifySupportContacts: false diff --git a/config/samples/clusters_v1beta1_kafka.yaml b/config/samples/clusters_v1beta1_kafka.yaml index 6b6c632f4..15b717d23 100644 --- a/config/samples/clusters_v1beta1_kafka.yaml +++ b/config/samples/clusters_v1beta1_kafka.yaml @@ -61,6 +61,29 @@ spec: # userRefs: # - name: kafkauser-sample # namespace: default + clusterResources: + clusterNetworkFirewallRules: +# - namespace: default +# name: clusternetworkfirewallrule-sample + awsVPCPeerings: +# - namespace: default +# name: awsvpcpeering-sample + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: + # - namespace: default + # name: gcpvpcpeering-sample + # - namespace: default + # name: gcpvpcpeering-sample-two + azureVNetPeerings: + # - namespace: default + # name: azurevnetpeering-sample + # - namespace: default + # name: azurevnetpeering-sample-trough resizeSettings: - notifySupportContacts: false concurrency: 1 \ No newline at end of file diff --git a/config/samples/clusters_v1beta1_kafkaconnect.yaml b/config/samples/clusters_v1beta1_kafkaconnect.yaml index 0b6a840a0..6e6134e2b 100644 --- a/config/samples/clusters_v1beta1_kafkaconnect.yaml +++ b/config/samples/clusters_v1beta1_kafkaconnect.yaml @@ -15,8 +15,31 @@ spec: nodeSize: "KCN-DEV-t4g.medium-30" network: "10.15.0.0/16" region: "US_EAST_1" - name: "Username-KC" - version: "3.1.2" + clusterResources: + clusterNetworkFirewallRules: + - namespace: default + name: clusternetworkfirewallrule-sample + awsVPCPeerings: +# - namespace: default +# name: awsvpcpeering-sample + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: + # - namespace: default + # name: gcpvpcpeering-sample + # - namespace: default + # name: gcpvpcpeering-sample-two + azureVNetPeerings: + # - namespace: default + # name: azurevnetpeering-sample + # - namespace: default + # name: azurevnetpeering-sample-trough + name: "oleksandr-KC" + version: "3.5.1" privateNetworkCluster: false slaTier: "NON_PRODUCTION" targetCluster: diff --git a/config/samples/clusters_v1beta1_opensearch.yaml b/config/samples/clusters_v1beta1_opensearch.yaml index 11268d699..911213d35 100644 --- a/config/samples/clusters_v1beta1_opensearch.yaml +++ b/config/samples/clusters_v1beta1_opensearch.yaml @@ -46,12 +46,38 @@ spec: # - nodeSize: SRH-DEV-t4g.small-5 # oidcProvider: '' # version: opensearch-dashboards:2.5.0 - version: 2.9.0 + version: 2.11.0 pciCompliance: false privateNetworkCluster: false reportingPlugin: false slaTier: NON_PRODUCTION sqlPlugin: false + clusterResources: + clusterBackups: +# - namespace: default +# name: clusterbackup-sample + clusterNetworkFirewallRules: +# - namespace: default +# name: clusternetworkfirewallrule-sample + awsVPCPeerings: +# - namespace: default +# name: awsvpcpeering-sample + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: + # - namespace: default + # name: gcpvpcpeering-sample + # - namespace: default + # name: gcpvpcpeering-sample-two + azureVNetPeerings: + # - namespace: default + # name: azurevnetpeering-sample + # - namespace: default + # name: azurevnetpeering-sample-trough # resizeSettings: # - notifySupportContacts: false # concurrency: 3 \ No newline at end of file diff --git a/config/samples/clusters_v1beta1_postgresql.yaml b/config/samples/clusters_v1beta1_postgresql.yaml index 11c3b5859..9fd8d0f39 100644 --- a/config/samples/clusters_v1beta1_postgresql.yaml +++ b/config/samples/clusters_v1beta1_postgresql.yaml @@ -45,6 +45,36 @@ spec: # userRefs: # - namespace: default # name: postgresqluser-sample + clusterResources: + clusterBackups: +# - namespace: default +# name: clusterbackup-sample + clusterNetworkFirewallRules: +# - namespace: default +# name: clusternetworkfirewallrule-sample + awsVPCPeerings: +# - namespace: default +# name: awsvpcpeering-sample +# dataCentreName: testDC1 +# - namespace: default +# name: awsvpcpeering-sample-two +# dataCentreName: testDC2 + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: +# - namespace: default +# name: gcpvpcpeering-sample +# - namespace: default +# name: gcpvpcpeering-sample-two + azureVNetPeerings: +# - namespace: default +# name: azurevnetpeering-sample +# - namespace: default +# name: azurevnetpeering-sample-trough privateNetworkCluster: false synchronousModeStrict: false # resizeSettings: diff --git a/config/samples/clusters_v1beta1_redis.yaml b/config/samples/clusters_v1beta1_redis.yaml index 9b1411529..95d85dded 100644 --- a/config/samples/clusters_v1beta1_redis.yaml +++ b/config/samples/clusters_v1beta1_redis.yaml @@ -24,6 +24,38 @@ spec: namespace: default # twoFactorDelete: # - email: "rostyslp@netapp.com" + clusterResources: + clusterBackups: +# - namespace: default +# name: clusterbackup-sample + # - namespace: default + # name: clusterbackup-sample-two + clusterNetworkFirewallRules: +# - namespace: default +# name: clusternetworkfirewallrule-sample + awsVPCPeerings: +# - namespace: default +# name: awsvpcpeering-sample +# dataCentreName: testDC1 +# - namespace: default +# name: awsvpcpeering-sample-two +# dataCentreName: testDC2 + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: + # - namespace: default + # name: gcpvpcpeering-sample + # - namespace: default + # name: gcpvpcpeering-sample-two + azureVNetPeerings: + # - namespace: default + # name: azurevnetpeering-sample + # - namespace: default + # name: azurevnetpeering-sample-trough dataCentres: - region: "US_WEST_2" cloudProvider: "AWS_VPC" diff --git a/config/samples/clusters_v1beta1_zookeeper.yaml b/config/samples/clusters_v1beta1_zookeeper.yaml index 4fb8adbe2..d952f13f8 100644 --- a/config/samples/clusters_v1beta1_zookeeper.yaml +++ b/config/samples/clusters_v1beta1_zookeeper.yaml @@ -12,7 +12,30 @@ spec: nodeSize: "zookeeper-production-m5.large-60" nodesNumber: 3 region: "US_EAST_1" + clusterResources: + clusterNetworkFirewallRules: +# - namespace: default +# name: clusternetworkfirewallrule-sample + awsVPCPeerings: + - namespace: default + name: awsvpcpeering-sample + awsSecurityGroupFirewallRules: +# - namespace: default +# name: awssecuritygroupfirewallrule-sample + exclusionWindows: +# - namespace: default +# name: exclusionwindow-sample + gcpVPCPeerings: + # - namespace: default + # name: gcpvpcpeering-sample + # - namespace: default + # name: gcpvpcpeering-sample-two + azureVNetPeerings: + # - namespace: default + # name: azurevnetpeering-sample + # - namespace: default + # name: azurevnetpeering-sample-trough name: "Username-zookeeper" privateNetworkCluster: false slaTier: "NON_PRODUCTION" - version: "3.7.1" + version: "3.8.2" diff --git a/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go b/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go index 742228611..0ea945269 100644 --- a/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go +++ b/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go @@ -75,14 +75,14 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) Reconcile(ctx context.Context, return ctrl.Result{}, err } - switch firewallRule.Annotations[models.ResourceStateAnnotation] { + switch firewallRule.Status.ResourceState { case models.CreatingEvent: return r.handleCreateFirewallRule(ctx, firewallRule, &l) case models.DeletingEvent: return r.handleDeleteFirewallRule(ctx, firewallRule, &l) case models.GenericEvent: l.Info("AWS security group firewall rule event isn't handled", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, "request", req, "event", firewallRule.Annotations[models.ResourceStateAnnotation]) @@ -100,13 +100,13 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleCreateFirewallRule( if firewallRule.Status.ID == "" { l.Info( "Creating AWS security group firewall rule", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) patch := firewallRule.NewPatch() - firewallRuleStatus, err := r.API.CreateFirewallRule(instaclustr.AWSSecurityGroupFirewallRuleEndpoint, &firewallRule.Spec) + firewallRuleStatus, err := r.API.CreateAWSSecurityGroupFirewallRule(&firewallRule.Spec, firewallRule.Status.ClusterID) if err != nil { l.Error( err, "Cannot create AWS security group firewall rule", @@ -126,9 +126,10 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleCreateFirewallRule( ) firewallRule.Status.FirewallRuleStatus = *firewallRuleStatus + firewallRule.Status.ResourceState = models.CreatedEvent err = r.Status().Patch(ctx, firewallRule, patch) if err != nil { - l.Error(err, "Cannot patch AWS security group firewall rule status ", "ID", firewallRule.Status.ID) + l.Error(err, "Cannot patch AWS security group firewall rule status", "ID", firewallRule.Status.ID) r.EventRecorder.Eventf( firewallRule, models.Warning, models.PatchFailed, "Resource status patch is failed. Reason: %v", @@ -142,7 +143,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleCreateFirewallRule( err = r.Patch(ctx, firewallRule, patch) if err != nil { l.Error(err, "Cannot patch AWS security group firewall rule", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) r.EventRecorder.Eventf( @@ -155,7 +156,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleCreateFirewallRule( l.Info( "AWS security group firewall rule resource has been created", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) } @@ -189,7 +190,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( err := r.Patch(ctx, firewallRule, patch) if err != nil { l.Error(err, "Cannot patch AWS security group firewall rule metadata", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) @@ -205,7 +206,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error( err, "Cannot get AWS security group firewall rule status from the Instaclustr API", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) @@ -219,10 +220,10 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( if status != nil && status.Status != statusDELETED { err = r.API.DeleteFirewallRule(firewallRule.Status.ID, instaclustr.AWSSecurityGroupFirewallRuleEndpoint) - if err != nil { + if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error(err, "Cannot delete AWS security group firewall rule", "rule ID", firewallRule.Status.ID, - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) @@ -239,13 +240,24 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( ) } + firewallRule.Status.ResourceState = models.DeletedEvent + err = r.Status().Patch(ctx, firewallRule, patch) + if err != nil { + l.Error(err, "Cannot patch AWS security group firewall rule status", "ID", firewallRule.Status.ID) + r.EventRecorder.Eventf( + firewallRule, models.Warning, models.PatchFailed, + "Resource status patch is failed. Reason: %v", + err, + ) + return ctrl.Result{}, err + } + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) controllerutil.RemoveFinalizer(firewallRule, models.DeletionFinalizer) - firewallRule.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, firewallRule, patch) if err != nil { l.Error(err, "Cannot patch AWS security group firewall rule metadata", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, "status", firewallRule.Status, ) @@ -259,7 +271,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( } l.Info("AWS security group firewall rule has been deleted", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, "status", firewallRule.Status, ) @@ -355,36 +367,21 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) SetupWithManager(mgr ctrl.Manag RateLimiter: ratelimiter.NewItemExponentialFailureRateLimiterWithMaxTries(ratelimiter.DefaultBaseDelay, ratelimiter.DefaultMaxDelay)}). For(&v1beta1.AWSSecurityGroupFirewallRule{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { - if event.Object.GetDeletionTimestamp() != nil { - event.Object.GetAnnotations()[models.ResourceStateAnnotation] = models.DeletingEvent - return true - } - - event.Object.GetAnnotations()[models.ResourceStateAnnotation] = models.CreatingEvent return true }, UpdateFunc: func(event event.UpdateEvent) bool { newObj := event.ObjectNew.(*v1beta1.AWSSecurityGroupFirewallRule) - if newObj.Generation == event.ObjectOld.GetGeneration() { - return false - } + oldObj := event.ObjectOld.(*v1beta1.AWSSecurityGroupFirewallRule) - if newObj.DeletionTimestamp != nil { - event.ObjectNew.GetAnnotations()[models.ResourceStateAnnotation] = models.DeletingEvent + if oldObj.Status.ResourceState == "" && newObj.Status.ResourceState == models.CreatingEvent { return true } - if newObj.Status.ID == "" { - newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent + if newObj.Status.ResourceState == models.DeletingEvent { return true } - newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent - return true - }, - GenericFunc: func(genericEvent event.GenericEvent) bool { - genericEvent.Object.GetAnnotations()[models.ResourceStateAnnotation] = models.GenericEvent - return true + return false }, })). Complete(r) diff --git a/controllers/clusterresources/awssecuritygroupfirewallrule_controller_test.go b/controllers/clusterresources/awssecuritygroupfirewallrule_controller_test.go index 522ddc8f1..789983956 100644 --- a/controllers/clusterresources/awssecuritygroupfirewallrule_controller_test.go +++ b/controllers/clusterresources/awssecuritygroupfirewallrule_controller_test.go @@ -33,8 +33,7 @@ var _ = Describe("Successful creation of a AWS Security Group Firewall Rule reso Context("When setting up a AWS Security Group Firewall Rule CRD", func() { awsSGFirewallRuleSpec := v1beta1.AWSSecurityGroupFirewallRuleSpec{ FirewallRuleSpec: v1beta1.FirewallRuleSpec{ - ClusterID: "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2", - Type: "SECURITY", + Type: "SECURITY", }, SecurityGroupID: "sg-1434412", } @@ -49,11 +48,21 @@ var _ = Describe("Successful creation of a AWS Security Group Firewall Rule reso }, }, Spec: awsSGFirewallRuleSpec, + Status: v1beta1.AWSSecurityGroupFirewallRuleStatus{ + FirewallRuleStatus: v1beta1.FirewallRuleStatus{ + ClusterID: "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2", + }, + }, } It("Should create a AWS Security Group Firewall Rule resources", func() { Expect(k8sClient.Create(ctx, &resource)).Should(Succeed()) + patch := resource.NewPatch() + resource.Status.ClusterID = "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2" + resource.Status.ResourceState = models.CreatingEvent + Expect(k8sClient.Status().Patch(ctx, &resource, patch)).Should(Succeed()) + By("Sending AWS Security Group Firewall Rule Specification to Instaclustr API v2") var awsSGFirewallRule v1beta1.AWSSecurityGroupFirewallRule Eventually(func() bool { diff --git a/controllers/clusterresources/awsvpcpeering_controller.go b/controllers/clusterresources/awsvpcpeering_controller.go index 246b23c84..46f8b48b9 100644 --- a/controllers/clusterresources/awsvpcpeering_controller.go +++ b/controllers/clusterresources/awsvpcpeering_controller.go @@ -73,7 +73,7 @@ func (r *AWSVPCPeeringReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } - switch aws.Annotations[models.ResourceStateAnnotation] { + switch aws.Status.ResourceState { case models.CreatingEvent: return r.handleCreatePeering(ctx, aws, l) case models.UpdatingEvent: @@ -104,7 +104,7 @@ func (r *AWSVPCPeeringReconciler) handleCreatePeering( "Region", aws.Spec.PeerRegion, ) - awsStatus, err := r.API.CreatePeering(instaclustr.AWSPeeringEndpoint, &aws.Spec) + awsStatus, err := r.API.CreateAWSVPCPeering(&aws.Spec, aws.Status.CDCID) if err != nil { l.Error( err, "cannot create AWS VPC Peering resource", @@ -126,9 +126,10 @@ func (r *AWSVPCPeeringReconciler) handleCreatePeering( patch := aws.NewPatch() aws.Status.PeeringStatus = *awsStatus + aws.Status.ResourceState = models.CreatedEvent err = r.Status().Patch(ctx, aws, patch) if err != nil { - l.Error(err, "cannot patch AWS VPC Peering resource status", + l.Error(err, "Cannot patch AWS VPC Peering resource status", "AWS Peering ID", awsStatus.ID, "AWS Account ID", aws.Spec.PeerAWSAccountID, "VPC ID", aws.Spec.PeerVPCID, @@ -144,10 +145,9 @@ func (r *AWSVPCPeeringReconciler) handleCreatePeering( } controllerutil.AddFinalizer(aws, models.DeletionFinalizer) - aws.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent err = r.Patch(ctx, aws, patch) if err != nil { - l.Error(err, "cannot patch AWS VPC Peering resource metadata", + l.Error(err, "Cannot patch AWS VPC Peering resource metadata", "AWS Peering ID", awsStatus.ID, "AWS Account ID", aws.Spec.PeerAWSAccountID, "VPC ID", aws.Spec.PeerVPCID, @@ -267,7 +267,7 @@ func (r *AWSVPCPeeringReconciler) handleUpdatePeering( aws.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent err = r.Patch(ctx, aws, patch) if err != nil { - l.Error(err, "cannot patch AWS VPC Peering resource metadata", + l.Error(err, "Cannot patch AWS VPC Peering resource metadata", "AWS Peering ID", aws.Status.ID, "AWS Account ID", aws.Spec.PeerAWSAccountID, "VPC ID", aws.Spec.PeerVPCID, @@ -287,7 +287,7 @@ func (r *AWSVPCPeeringReconciler) handleUpdatePeering( "AWS Account ID", aws.Spec.PeerAWSAccountID, "VPC ID", aws.Spec.PeerVPCID, "Region", aws.Spec.PeerRegion, - "AWS VPC Peering Data Centre ID", aws.Spec.DataCentreID, + "AWS VPC Peering Data Centre ID", aws.Status.CDCID, "AWS VPC Peering Status", aws.Status.PeeringStatus, ) @@ -343,11 +343,29 @@ func (r *AWSVPCPeeringReconciler) handleDeletePeering( r.Scheduler.RemoveJob(aws.GetJobID(scheduler.StatusChecker)) patch := aws.NewPatch() + + aws.Status.ResourceState = models.DeletedEvent + err = r.Status().Patch(ctx, aws, patch) + if err != nil { + l.Error(err, "Cannot patch AWS VPC Peering resource status", + "AWS Peering ID", aws.Status.ID, + "AWS Account ID", aws.Spec.PeerAWSAccountID, + "VPC ID", aws.Spec.PeerVPCID, + "Region", aws.Spec.PeerRegion, + "AWS VPC Peering metadata", aws.ObjectMeta, + ) + r.EventRecorder.Eventf( + aws, models.Warning, models.PatchFailed, + "Resource status patch is failed. Reason: %v", + err, + ) + return ctrl.Result{}, err + } + controllerutil.RemoveFinalizer(aws, models.DeletionFinalizer) - aws.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, aws, patch) if err != nil { - l.Error(err, "cannot patch AWS VPC Peering resource metadata", + l.Error(err, "Cannot patch AWS VPC Peering resource metadata", "AWS Peering ID", aws.Status.ID, "AWS Account ID", aws.Spec.PeerAWSAccountID, "VPC ID", aws.Spec.PeerVPCID, @@ -366,7 +384,7 @@ func (r *AWSVPCPeeringReconciler) handleDeletePeering( "AWS VPC Peering ID", aws.Status.ID, "VPC ID", aws.Spec.PeerVPCID, "Region", aws.Spec.PeerRegion, - "AWS VPC Peering Data Centre ID", aws.Spec.DataCentreID, + "AWS VPC Peering Data Centre ID", aws.Status.CDCID, "AWS VPC Peering Status", aws.Status.PeeringStatus, ) @@ -476,21 +494,17 @@ func (r *AWSVPCPeeringReconciler) SetupWithManager(mgr ctrl.Manager) error { RateLimiter: ratelimiter.NewItemExponentialFailureRateLimiterWithMaxTries(ratelimiter.DefaultBaseDelay, ratelimiter.DefaultMaxDelay)}). For(&v1beta1.AWSVPCPeering{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}) - if event.Object.GetDeletionTimestamp() != nil { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.DeletingEvent}) - } return true }, UpdateFunc: func(event event.UpdateEvent) bool { newObj := event.ObjectNew.(*v1beta1.AWSVPCPeering) - if newObj.DeletionTimestamp != nil { - newObj.Annotations[models.ResourceStateAnnotation] = models.DeletingEvent + oldObj := event.ObjectOld.(*v1beta1.AWSVPCPeering) + + if oldObj.Status.ResourceState == "" && newObj.Status.ResourceState == models.CreatingEvent { return true } - if newObj.Status.ID == "" { - newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent + if newObj.Status.ResourceState == models.DeletingEvent { return true } @@ -498,16 +512,12 @@ func (r *AWSVPCPeeringReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } - newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent + newObj.Status.ResourceState = models.UpdatingEvent return true }, DeleteFunc: func(event event.DeleteEvent) bool { return false }, - GenericFunc: func(event event.GenericEvent) bool { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.GenericEvent}) - return true - }, })).Complete(r) } diff --git a/controllers/clusterresources/awsvpcpeering_controller_test.go b/controllers/clusterresources/awsvpcpeering_controller_test.go index a014d8871..2170302ac 100644 --- a/controllers/clusterresources/awsvpcpeering_controller_test.go +++ b/controllers/clusterresources/awsvpcpeering_controller_test.go @@ -33,8 +33,7 @@ var _ = Describe("Successful creation of a AWS VPC Peering resource", func() { Context("When setting up a AWS VPC Peering CRD", func() { awsVPCPeeringSpec := v1beta1.AWSVPCPeeringSpec{ VPCPeeringSpec: v1beta1.VPCPeeringSpec{ - DataCentreID: "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2", - PeerSubnets: []string{"172.31.0.0/16", "192.168.0.0/16"}, + PeerSubnets: []string{"172.31.0.0/16", "192.168.0.0/16"}, }, PeerAWSAccountID: "152668027680", PeerVPCID: "vpc-87241ae1", @@ -56,6 +55,11 @@ var _ = Describe("Successful creation of a AWS VPC Peering resource", func() { It("Should create a AWS VPC Peering resources", func() { Expect(k8sClient.Create(ctx, &resource)).Should(Succeed()) + patch := resource.NewPatch() + resource.Status.CDCID = "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2" + resource.Status.ResourceState = models.CreatingEvent + Expect(k8sClient.Status().Patch(ctx, &resource, patch)).Should(Succeed()) + By("Sending AWS VPC Peering Specification to Instaclustr API v2") var awsVPCPeering v1beta1.AWSVPCPeering Eventually(func() bool { diff --git a/controllers/clusterresources/azurevnetpeering_controller.go b/controllers/clusterresources/azurevnetpeering_controller.go index f6f2f9e78..b73c27152 100644 --- a/controllers/clusterresources/azurevnetpeering_controller.go +++ b/controllers/clusterresources/azurevnetpeering_controller.go @@ -73,9 +73,9 @@ func (r *AzureVNetPeeringReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, err } - switch azure.Annotations[models.ResourceStateAnnotation] { + switch azure.Status.ResourceState { case models.CreatingEvent: - return r.handleCreatePeering(ctx, azure, l) + return r.handleCreatePeering(ctx, azure, &l) case models.UpdatingEvent: return r.handleUpdatePeering(ctx, azure, &l) case models.DeletingEvent: @@ -95,7 +95,7 @@ func (r *AzureVNetPeeringReconciler) Reconcile(ctx context.Context, req ctrl.Req func (r *AzureVNetPeeringReconciler) handleCreatePeering( ctx context.Context, azure *v1beta1.AzureVNetPeering, - l logr.Logger, + l *logr.Logger, ) (ctrl.Result, error) { if azure.Status.ID == "" { l.Info( @@ -106,7 +106,7 @@ func (r *AzureVNetPeeringReconciler) handleCreatePeering( "Vnet Name", azure.Spec.PeerVirtualNetworkName, ) - azureStatus, err := r.API.CreatePeering(instaclustr.AzurePeeringEndpoint, &azure.Spec) + azureStatus, err := r.API.CreateAzureVNetPeering(&azure.Spec, azure.Status.CDCID) if err != nil { l.Error( err, "cannot create Azure VNet Peering resource", @@ -128,9 +128,10 @@ func (r *AzureVNetPeeringReconciler) handleCreatePeering( patch := azure.NewPatch() azure.Status.PeeringStatus = *azureStatus + azure.Status.ResourceState = models.CreatedEvent err = r.Status().Patch(ctx, azure, patch) if err != nil { - l.Error(err, "cannot patch Azure VNet Peering resource status", + l.Error(err, "Cannot patch Azure VNet Peering resource status", "Azure Subscription ID", azure.Spec.PeerSubscriptionID, "AD Object ID", azure.Spec.PeerADObjectID, "Resource Group", azure.Spec.PeerResourceGroup, @@ -145,10 +146,9 @@ func (r *AzureVNetPeeringReconciler) handleCreatePeering( } controllerutil.AddFinalizer(azure, models.DeletionFinalizer) - azure.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent err = r.Patch(ctx, azure, patch) if err != nil { - l.Error(err, "cannot patch Azure VNet Peering resource metadata", + l.Error(err, "Cannot patch Azure VNet Peering resource metadata", "Azure Subscription ID", azure.Spec.PeerSubscriptionID, "AD Object ID", azure.Spec.PeerADObjectID, "Resource Group", azure.Spec.PeerResourceGroup, @@ -251,11 +251,28 @@ func (r *AzureVNetPeeringReconciler) handleDeletePeering( } patch := azure.NewPatch() + + azure.Status.ResourceState = models.DeletedEvent + err = r.Status().Patch(ctx, azure, patch) + if err != nil { + l.Error(err, "Cannot patch Azure VNet Peering resource status", + "Azure Subscription ID", azure.Spec.PeerSubscriptionID, + "AD Object ID", azure.Spec.PeerADObjectID, + "Resource Group", azure.Spec.PeerResourceGroup, + "Vnet Name", azure.Spec.PeerVirtualNetworkName, + ) + r.EventRecorder.Eventf( + azure, models.Warning, models.PatchFailed, + "Resource status patch is failed. Reason: %v", + err, + ) + return ctrl.Result{}, err + } + controllerutil.RemoveFinalizer(azure, models.DeletionFinalizer) - azure.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, azure, patch) if err != nil { - l.Error(err, "cannot patch Azure VNet Peering resource metadata", + l.Error(err, "Cannot patch Azure VNet Peering resource metadata", "Azure Subscription ID", azure.Spec.PeerSubscriptionID, "AD Object ID", azure.Spec.PeerADObjectID, "Resource Group", azure.Spec.PeerResourceGroup, @@ -373,21 +390,17 @@ func (r *AzureVNetPeeringReconciler) SetupWithManager(mgr ctrl.Manager) error { RateLimiter: ratelimiter.NewItemExponentialFailureRateLimiterWithMaxTries(ratelimiter.DefaultBaseDelay, ratelimiter.DefaultMaxDelay)}). For(&v1beta1.AzureVNetPeering{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}) - if event.Object.GetDeletionTimestamp() != nil { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.DeletingEvent}) - } return true }, UpdateFunc: func(event event.UpdateEvent) bool { newObj := event.ObjectNew.(*v1beta1.AzureVNetPeering) - if newObj.DeletionTimestamp != nil { - newObj.Annotations[models.ResourceStateAnnotation] = models.DeletingEvent + oldObj := event.ObjectOld.(*v1beta1.AzureVNetPeering) + + if oldObj.Status.ResourceState == "" && newObj.Status.ResourceState == models.CreatingEvent { return true } - if newObj.Status.ID == "" { - newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent + if newObj.Status.ResourceState == models.DeletingEvent { return true } @@ -395,7 +408,7 @@ func (r *AzureVNetPeeringReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } - newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent + newObj.Status.ResourceState = models.UpdatingEvent return true }, DeleteFunc: func(event event.DeleteEvent) bool { diff --git a/controllers/clusterresources/azurevnetpeering_controller_test.go b/controllers/clusterresources/azurevnetpeering_controller_test.go index 22482be52..9e8bec760 100644 --- a/controllers/clusterresources/azurevnetpeering_controller_test.go +++ b/controllers/clusterresources/azurevnetpeering_controller_test.go @@ -33,8 +33,7 @@ var _ = Describe("Successful creation of a Azure VNet Peering resource", func() Context("When setting up a Azure VNet Peering CRD", func() { azureVNetPeeringSpec := v1beta1.AzureVNetPeeringSpec{ VPCPeeringSpec: v1beta1.VPCPeeringSpec{ - DataCentreID: "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2", - PeerSubnets: []string{"172.31.0.0/16", "192.168.0.0/16"}, + PeerSubnets: []string{"172.31.0.0/16", "192.168.0.0/16"}, }, PeerResourceGroup: "rg-1231212", PeerSubscriptionID: "sg-123321", @@ -57,6 +56,11 @@ var _ = Describe("Successful creation of a Azure VNet Peering resource", func() It("Should create a Azure VNet Peering resources", func() { Expect(k8sClient.Create(ctx, &resource)).Should(Succeed()) + patch := resource.NewPatch() + resource.Status.CDCID = "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2" + resource.Status.ResourceState = models.CreatingEvent + Expect(k8sClient.Status().Patch(ctx, &resource, patch)).Should(Succeed()) + By("Sending Azure VNet Peering Specification to Instaclustr API v2") var azureVNetPeering v1beta1.AzureVNetPeering Eventually(func() bool { diff --git a/controllers/clusterresources/clusterbackup_controller.go b/controllers/clusterresources/clusterbackup_controller.go index aa25574f9..feb2b52b1 100644 --- a/controllers/clusterresources/clusterbackup_controller.go +++ b/controllers/clusterresources/clusterbackup_controller.go @@ -79,11 +79,15 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques patch := backup.NewPatch() - if backup.Labels[models.ClusterIDLabel] != backup.Spec.ClusterID { + if backup.Status.ClusterID == "" { + return models.ExitReconcile, nil + } + + if backup.Labels[models.ClusterIDLabel] != backup.Status.ClusterID { if backup.Labels == nil { - backup.Labels = map[string]string{models.ClusterIDLabel: backup.Spec.ClusterID} + backup.Labels = map[string]string{models.ClusterIDLabel: backup.Status.ClusterID} } else { - backup.Labels[models.ClusterIDLabel] = backup.Spec.ClusterID + backup.Labels[models.ClusterIDLabel] = backup.Status.ClusterID } err = r.Patch(ctx, backup, patch) if err != nil { @@ -100,11 +104,11 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques } } - backupsList, err := r.listClusterBackups(ctx, backup.Spec.ClusterID, backup.Namespace) + backupsList, err := r.listClusterBackups(ctx, backup.Status.ClusterID, backup.Namespace) if err != nil { logger.Error(err, "Cannot get cluster backups", "backup name", backup.Name, - "cluster ID", backup.Spec.ClusterID, + "cluster ID", backup.Status.ClusterID, ) r.EventRecorder.Eventf( @@ -120,11 +124,11 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques clusterKind = models.PgAppKind } - iBackup, err := r.API.GetClusterBackups(backup.Spec.ClusterID, clusterKind) + iBackup, err := r.API.GetClusterBackups(backup.Status.ClusterID, clusterKind) if err != nil { logger.Error(err, "Cannot get cluster backups from Instaclustr", "backup name", backup.Name, - "cluster ID", backup.Spec.ClusterID, + "cluster ID", backup.Status.ClusterID, ) r.EventRecorder.Eventf( @@ -138,11 +142,11 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques iBackupEvents := iBackup.GetBackupEvents(backup.Spec.ClusterKind) if len(iBackupEvents) < len(backupsList.Items) { - err = r.API.TriggerClusterBackup(backup.Spec.ClusterID, models.ClusterKindsMap[backup.Spec.ClusterKind]) + err = r.API.TriggerClusterBackup(backup.Status.ClusterID, models.ClusterKindsMap[backup.Spec.ClusterKind]) if err != nil { logger.Error(err, "Cannot trigger cluster backup", "backup name", backup.Name, - "cluster ID", backup.Spec.ClusterID, + "cluster ID", backup.Status.ClusterID, ) r.EventRecorder.Eventf( @@ -158,7 +162,7 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques "Resource creation request is sent", ) logger.Info("New cluster backup request was sent", - "cluster ID", backup.Spec.ClusterID, + "cluster ID", backup.Status.ClusterID, ) } @@ -216,7 +220,7 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques logger.Info("Cluster backup resource was reconciled", "backup name", backup.Name, - "cluster ID", backup.Spec.ClusterID, + "cluster ID", backup.Status.ClusterID, ) return ctrl.Result{}, nil @@ -243,7 +247,8 @@ func (r *ClusterBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { RateLimiter: ratelimiter.NewItemExponentialFailureRateLimiterWithMaxTries(ratelimiter.DefaultBaseDelay, ratelimiter.DefaultMaxDelay)}). For(&v1beta1.ClusterBackup{}, builder.WithPredicates(predicate.Funcs{ UpdateFunc: func(event event.UpdateEvent) bool { - return false + newObj := event.ObjectNew.(*v1beta1.ClusterBackup) + return newObj.Status.ClusterID != "" }, })). Complete(r) diff --git a/controllers/clusterresources/clusternetworkfirewallrule_controller.go b/controllers/clusterresources/clusternetworkfirewallrule_controller.go index fa145717c..2fa7e4fba 100644 --- a/controllers/clusterresources/clusternetworkfirewallrule_controller.go +++ b/controllers/clusterresources/clusternetworkfirewallrule_controller.go @@ -79,7 +79,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) Reconcile(ctx context.Context, re return ctrl.Result{}, err } - switch firewallRule.Annotations[models.ResourceStateAnnotation] { + switch firewallRule.Status.ResourceState { case models.CreatingEvent: return r.HandleCreateFirewallRule(ctx, firewallRule, &l) case models.UpdatingEvent: @@ -88,7 +88,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) Reconcile(ctx context.Context, re return r.HandleDeleteFirewallRule(ctx, firewallRule, &l) case models.GenericEvent: l.Info("Cluster network firewall rule event isn't handled", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, "request", req, "event", firewallRule.Annotations[models.ResourceStateAnnotation]) @@ -106,13 +106,13 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleCreateFirewallRule( if firewallRule.Status.ID == "" { l.Info( "Creating cluster network firewall rule", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) patch := firewallRule.NewPatch() - firewallRuleStatus, err := r.API.CreateFirewallRule(instaclustr.ClusterNetworkFirewallRuleEndpoint, &firewallRule.Spec) + firewallRuleStatus, err := r.API.CreateClusterNetworkFirewallRule(&firewallRule.Spec, firewallRule.Status.ClusterID) if err != nil { l.Error( err, "Cannot create cluster network firewall rule", @@ -132,7 +132,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleCreateFirewallRule( ) firewallRule.Status.FirewallRuleStatus = *firewallRuleStatus - + firewallRule.Status.ResourceState = models.CreatedEvent err = r.Status().Patch(ctx, firewallRule, patch) if err != nil { l.Error(err, "Cannot patch cluster network firewall rule status ", "ID", firewallRule.Status.ID) @@ -144,13 +144,12 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleCreateFirewallRule( return ctrl.Result{}, err } - firewallRule.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent controllerutil.AddFinalizer(firewallRule, models.DeletionFinalizer) err = r.Patch(ctx, firewallRule, patch) if err != nil { l.Error(err, "Cannot patch cluster network firewall rule", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) r.EventRecorder.Eventf( @@ -163,7 +162,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleCreateFirewallRule( l.Info( "Cluster network firewall rule resource has been created", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) } @@ -194,7 +193,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleUpdateFirewallRule( l *logr.Logger, ) (ctrl.Result, error) { l.Info("Cluster network firewall rule update is not implemented", - "firewall rule ID", firewallRule.Spec.ClusterID, + "firewall rule ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) @@ -210,7 +209,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( err := r.Patch(ctx, firewallRule, patch) if err != nil { l.Error(err, "Cannot patch cluster network firewall rule metadata", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) r.EventRecorder.Eventf( @@ -225,7 +224,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error( err, "Cannot get cluster network firewall rule status from the Instaclustr API", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) r.EventRecorder.Eventf( @@ -238,10 +237,10 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( if status != nil && status.Status != statusDELETED { err = r.API.DeleteFirewallRule(firewallRule.Status.ID, instaclustr.ClusterNetworkFirewallRuleEndpoint) - if err != nil { + if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error(err, "Cannot delete cluster network firewall rule", "rule ID", firewallRule.Status.ID, - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, ) r.EventRecorder.Eventf( @@ -258,13 +257,24 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( ) } + firewallRule.Status.ResourceState = models.DeletedEvent + err = r.Status().Patch(ctx, firewallRule, patch) + if err != nil { + l.Error(err, "Cannot patch cluster network firewall rule status", "ID", firewallRule.Status.ID) + r.EventRecorder.Eventf( + firewallRule, models.Warning, models.PatchFailed, + "Resource status patch is failed. Reason: %v", + err, + ) + return ctrl.Result{}, err + } + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) controllerutil.RemoveFinalizer(firewallRule, models.DeletionFinalizer) - firewallRule.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, firewallRule, patch) if err != nil { l.Error(err, "Cannot patch cluster network firewall rule metadata", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, "status", firewallRule.Status, ) @@ -277,7 +287,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( } l.Info("Cluster network firewall rule has been deleted", - "cluster ID", firewallRule.Spec.ClusterID, + "cluster ID", firewallRule.Status.ClusterID, "type", firewallRule.Spec.Type, "status", firewallRule.Status, ) @@ -332,23 +342,17 @@ func (r *ClusterNetworkFirewallRuleReconciler) SetupWithManager(mgr ctrl.Manager RateLimiter: ratelimiter.NewItemExponentialFailureRateLimiterWithMaxTries(ratelimiter.DefaultBaseDelay, ratelimiter.DefaultMaxDelay)}). For(&v1beta1.ClusterNetworkFirewallRule{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { - if event.Object.GetDeletionTimestamp() != nil { - event.Object.GetAnnotations()[models.ResourceStateAnnotation] = models.DeletingEvent - return true - } - - event.Object.GetAnnotations()[models.ResourceStateAnnotation] = models.CreatingEvent return true }, UpdateFunc: func(event event.UpdateEvent) bool { newObj := event.ObjectNew.(*v1beta1.ClusterNetworkFirewallRule) - if newObj.DeletionTimestamp != nil { - newObj.Annotations[models.ResourceStateAnnotation] = models.DeletingEvent + oldObj := event.ObjectOld.(*v1beta1.ClusterNetworkFirewallRule) + + if oldObj.Status.ResourceState == "" && newObj.Status.ResourceState == models.CreatingEvent { return true } - if newObj.Status.ID == "" { - newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent + if newObj.Status.ResourceState == models.DeletingEvent { return true } @@ -356,11 +360,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) SetupWithManager(mgr ctrl.Manager return false } - newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent - return true - }, - GenericFunc: func(genericEvent event.GenericEvent) bool { - genericEvent.Object.GetAnnotations()[models.ResourceStateAnnotation] = models.GenericEvent + newObj.Status.ResourceState = models.UpdatingEvent return true }, })). diff --git a/controllers/clusterresources/clusternetworkfirewallrule_controller_test.go b/controllers/clusterresources/clusternetworkfirewallrule_controller_test.go index 774b3dbd8..78d94f6ad 100644 --- a/controllers/clusterresources/clusternetworkfirewallrule_controller_test.go +++ b/controllers/clusterresources/clusternetworkfirewallrule_controller_test.go @@ -33,8 +33,7 @@ var _ = Describe("Successful creation of a Cluster Network Firewall Rule resourc Context("When setting up a Cluster Network Firewall Rule CRD", func() { clusterNetworkFirewallRuleSpec := v1beta1.ClusterNetworkFirewallRuleSpec{ FirewallRuleSpec: v1beta1.FirewallRuleSpec{ - ClusterID: "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2", - Type: "SECURITY", + Type: "SECURITY", }, Network: "191.54.123.1/24", } @@ -54,6 +53,11 @@ var _ = Describe("Successful creation of a Cluster Network Firewall Rule resourc It("Should create a Cluster Network Firewall Rule resources", func() { Expect(k8sClient.Create(ctx, &resource)).Should(Succeed()) + patch := resource.NewPatch() + resource.Status.ClusterID = "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2" + resource.Status.ResourceState = models.CreatingEvent + Expect(k8sClient.Status().Patch(ctx, &resource, patch)).Should(Succeed()) + By("Sending Cluster Network Firewall Rule Specification to Instaclustr API v2") var clusterNetworkFirewallRule v1beta1.ClusterNetworkFirewallRule Eventually(func() bool { diff --git a/controllers/clusterresources/exclusionwindow_controller.go b/controllers/clusterresources/exclusionwindow_controller.go index c46b07f7e..1901d879d 100644 --- a/controllers/clusterresources/exclusionwindow_controller.go +++ b/controllers/clusterresources/exclusionwindow_controller.go @@ -75,14 +75,14 @@ func (r *ExclusionWindowReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } - switch ew.Annotations[models.ResourceStateAnnotation] { + switch ew.Status.ResourceState { case models.CreatingEvent: return r.handleCreateWindow(ctx, ew, l) case models.DeletingEvent: return r.handleDeleteWindow(ctx, ew, l) default: l.Info("event isn't handled", - "Cluster ID", ew.Spec.ClusterID, + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, "Request", req, "event", ew.Annotations[models.ResourceStateAnnotation]) @@ -98,11 +98,11 @@ func (r *ExclusionWindowReconciler) handleCreateWindow( if ew.Status.ID == "" { l.Info( "Creating Exclusion Window resource", - "Cluster ID", ew.Spec.ClusterID, + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, ) - id, err := r.API.CreateExclusionWindow(ew.Spec.ClusterID, &ew.Spec) + id, err := r.API.CreateExclusionWindow(ew.Status.ClusterID, &ew.Spec) if err != nil { l.Error( err, "cannot create Exclusion Window resource", @@ -123,10 +123,11 @@ func (r *ExclusionWindowReconciler) handleCreateWindow( patch := ew.NewPatch() ew.Status.ID = id + ew.Status.ResourceState = models.CreatedEvent err = r.Status().Patch(ctx, ew, patch) if err != nil { - l.Error(err, "cannot patch Exclusion Window resource status after creation", - "Cluster ID", ew.Spec.ClusterID, + l.Error(err, "Cannot patch Exclusion Window resource status after creation", + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, "Exclusion Window metadata", ew.ObjectMeta, ) @@ -139,11 +140,10 @@ func (r *ExclusionWindowReconciler) handleCreateWindow( } controllerutil.AddFinalizer(ew, models.DeletionFinalizer) - ew.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent err = r.Patch(ctx, ew, patch) if err != nil { - l.Error(err, "cannot patch Exclusion Window resource metadata with created event", - "Cluster ID", ew.Spec.ClusterID, + l.Error(err, "Cannot patch Exclusion Window resource metadata with created event", + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, "Exclusion Window metadata", ew.ObjectMeta, ) @@ -157,7 +157,7 @@ func (r *ExclusionWindowReconciler) handleCreateWindow( l.Info( "Exclusion Window resource was created", - "Cluster ID", ew.Spec.ClusterID, + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, ) } @@ -174,7 +174,7 @@ func (r *ExclusionWindowReconciler) handleDeleteWindow( if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error( err, "cannot get Exclusion Window status from the Instaclustr API", - "Cluster ID", ew.Spec.ClusterID, + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, ) r.EventRecorder.Eventf( @@ -187,9 +187,9 @@ func (r *ExclusionWindowReconciler) handleDeleteWindow( if status != "" { err = r.API.DeleteExclusionWindow(ew.Status.ID) - if err != nil { + if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error(err, "cannot delete Exclusion Window resource", - "Cluster ID", ew.Spec.ClusterID, + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, "Exclusion Window metadata", ew.ObjectMeta, ) @@ -207,12 +207,26 @@ func (r *ExclusionWindowReconciler) handleDeleteWindow( } patch := ew.NewPatch() + ew.Status.ResourceState = models.DeletedEvent + err = r.Status().Patch(ctx, ew, patch) + if err != nil { + l.Error(err, "Cannot patch Exclusion Window resource status", + "Cluster ID", ew.Status.ClusterID, + "Exclusion Window Spec", ew.Spec, + "Exclusion Window metadata", ew.ObjectMeta, + ) + r.EventRecorder.Eventf( + ew, models.Warning, models.PatchFailed, + "Status patch is failed. Reason: %v", + err, + ) + return ctrl.Result{}, err + } controllerutil.RemoveFinalizer(ew, models.DeletionFinalizer) - ew.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, ew, patch) if err != nil { - l.Error(err, "cannot patch Exclusion Window resource metadata with deleted event", - "Cluster ID", ew.Spec.ClusterID, + l.Error(err, "Cannot patch Exclusion Window resource metadata with deleted event", + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, "Exclusion Window metadata", ew.ObjectMeta, ) @@ -225,7 +239,7 @@ func (r *ExclusionWindowReconciler) handleDeleteWindow( } l.Info("Exclusion Window has been deleted", - "Cluster ID", ew.Spec.ClusterID, + "Cluster ID", ew.Status.ClusterID, "Exclusion Window Spec", ew.Spec, "Exclusion Window Status", ew.Status, ) @@ -245,21 +259,17 @@ func (r *ExclusionWindowReconciler) SetupWithManager(mgr ctrl.Manager) error { RateLimiter: ratelimiter.NewItemExponentialFailureRateLimiterWithMaxTries(ratelimiter.DefaultBaseDelay, ratelimiter.DefaultMaxDelay)}). For(&v1beta1.ExclusionWindow{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}) - if event.Object.GetDeletionTimestamp() != nil { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.DeletingEvent}) - } return true }, UpdateFunc: func(event event.UpdateEvent) bool { newObj := event.ObjectNew.(*v1beta1.ExclusionWindow) - if newObj.DeletionTimestamp != nil { - newObj.Annotations[models.ResourceStateAnnotation] = models.DeletingEvent + oldObj := event.ObjectOld.(*v1beta1.ExclusionWindow) + + if oldObj.Status.ResourceState == "" && newObj.Status.ResourceState == models.CreatingEvent { return true } - if newObj.Status.ID == "" { - newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent + if newObj.Status.ResourceState == models.DeletingEvent { return true } @@ -273,9 +283,5 @@ func (r *ExclusionWindowReconciler) SetupWithManager(mgr ctrl.Manager) error { DeleteFunc: func(event event.DeleteEvent) bool { return false }, - GenericFunc: func(event event.GenericEvent) bool { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.GenericEvent}) - return true - }, })).Complete(r) } diff --git a/controllers/clusterresources/gcpvpcpeering_controller.go b/controllers/clusterresources/gcpvpcpeering_controller.go index e8c16c5cc..fb3d470b6 100644 --- a/controllers/clusterresources/gcpvpcpeering_controller.go +++ b/controllers/clusterresources/gcpvpcpeering_controller.go @@ -73,13 +73,13 @@ func (r *GCPVPCPeeringReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } - switch gcp.Annotations[models.ResourceStateAnnotation] { + switch gcp.Status.ResourceState { case models.CreatingEvent: - return r.handleCreateCluster(ctx, gcp, l) - case models.UpdatingEvent: - return r.handleUpdateCluster(ctx, gcp, l) + return r.handleCreatePeering(ctx, gcp, l) case models.DeletingEvent: - return r.handleDeleteCluster(ctx, gcp, l) + return r.handleDeletePeering(ctx, gcp, l) + case models.UpdatingEvent: + return r.handleUpdatePeering(ctx, gcp, l) default: l.Info("Event isn't handled", "project ID", gcp.Spec.PeerProjectID, @@ -90,7 +90,7 @@ func (r *GCPVPCPeeringReconciler) Reconcile(ctx context.Context, req ctrl.Reques } } -func (r *GCPVPCPeeringReconciler) handleCreateCluster( +func (r *GCPVPCPeeringReconciler) handleCreatePeering( ctx context.Context, gcp *v1beta1.GCPVPCPeering, l logr.Logger, @@ -102,7 +102,7 @@ func (r *GCPVPCPeeringReconciler) handleCreateCluster( "network name", gcp.Spec.PeerVPCNetworkName, ) - gcpStatus, err := r.API.CreatePeering(instaclustr.GCPPeeringEndpoint, &gcp.Spec) + gcpStatus, err := r.API.CreateGCPVPCPeering(&gcp.Spec, gcp.Status.CDCID) if err != nil { l.Error( err, "Cannot create GCP VPC Peering resource", @@ -124,6 +124,7 @@ func (r *GCPVPCPeeringReconciler) handleCreateCluster( patch := gcp.NewPatch() gcp.Status.PeeringStatus = *gcpStatus + gcp.Status.ResourceState = models.CreatedEvent err = r.Status().Patch(ctx, gcp, patch) if err != nil { l.Error(err, "Cannot patch GCP VPC Peering resource status", @@ -140,7 +141,6 @@ func (r *GCPVPCPeeringReconciler) handleCreateCluster( } controllerutil.AddFinalizer(gcp, models.DeletionFinalizer) - gcp.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent err = r.Patch(ctx, gcp, patch) if err != nil { l.Error(err, "Cannot patch GCP VPC Peering resource metadata", @@ -183,7 +183,7 @@ func (r *GCPVPCPeeringReconciler) handleCreateCluster( return ctrl.Result{}, nil } -func (r *GCPVPCPeeringReconciler) handleUpdateCluster( +func (r *GCPVPCPeeringReconciler) handleUpdatePeering( ctx context.Context, gcp *v1beta1.GCPVPCPeering, l logr.Logger, @@ -193,7 +193,7 @@ func (r *GCPVPCPeeringReconciler) handleUpdateCluster( return ctrl.Result{}, nil } -func (r *GCPVPCPeeringReconciler) handleDeleteCluster( +func (r *GCPVPCPeeringReconciler) handleDeletePeering( ctx context.Context, gcp *v1beta1.GCPVPCPeering, l logr.Logger, @@ -202,7 +202,7 @@ func (r *GCPVPCPeeringReconciler) handleDeleteCluster( if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error( err, "Cannot get GCP VPC Peering status from the Instaclustr API", - "id", status.ID, + "id", gcp.Status.ID, "project ID", gcp.Spec.PeerProjectID, "network name", gcp.Spec.PeerVPCNetworkName, ) @@ -237,9 +237,23 @@ func (r *GCPVPCPeeringReconciler) handleDeleteCluster( } patch := gcp.NewPatch() + gcp.Status.ResourceState = models.DeletedEvent + err = r.Status().Patch(ctx, gcp, patch) + if err != nil { + l.Error(err, "Cannot patch GCP VPC Peering resource status", + "project ID", gcp.Spec.PeerProjectID, + "network name", gcp.Spec.PeerVPCNetworkName, + "metadata", gcp.ObjectMeta, + ) + r.EventRecorder.Eventf( + gcp, models.Warning, models.PatchFailed, + "Resource status patch is failed. Reason: %v", + err, + ) + return ctrl.Result{}, err + } r.Scheduler.RemoveJob(gcp.GetJobID(scheduler.StatusChecker)) controllerutil.RemoveFinalizer(gcp, models.DeletionFinalizer) - gcp.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, gcp, patch) if err != nil { l.Error(err, "Cannot patch GCP VPC Peering resource metadata", @@ -260,7 +274,7 @@ func (r *GCPVPCPeeringReconciler) handleDeleteCluster( "id", gcp.Status.ID, "project ID", gcp.Spec.PeerProjectID, "network name", gcp.Spec.PeerVPCNetworkName, - "data centre ID", gcp.Spec.DataCentreID, + "data centre ID", gcp.Status.CDCID, "status", gcp.Status.PeeringStatus, ) @@ -356,21 +370,17 @@ func (r *GCPVPCPeeringReconciler) SetupWithManager(mgr ctrl.Manager) error { RateLimiter: ratelimiter.NewItemExponentialFailureRateLimiterWithMaxTries(ratelimiter.DefaultBaseDelay, ratelimiter.DefaultMaxDelay)}). For(&v1beta1.GCPVPCPeering{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}) - if event.Object.GetDeletionTimestamp() != nil { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.DeletingEvent}) - } return true }, UpdateFunc: func(event event.UpdateEvent) bool { newObj := event.ObjectNew.(*v1beta1.GCPVPCPeering) - if newObj.DeletionTimestamp != nil { - newObj.Annotations[models.ResourceStateAnnotation] = models.DeletingEvent + oldObj := event.ObjectOld.(*v1beta1.GCPVPCPeering) + + if oldObj.Status.ResourceState == "" && newObj.Status.ResourceState == models.CreatingEvent { return true } - if newObj.Status.ID == "" { - newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent + if newObj.Status.ResourceState == models.DeletingEvent { return true } @@ -378,15 +388,11 @@ func (r *GCPVPCPeeringReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } - newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent + newObj.Status.ResourceState = models.UpdatingEvent return true }, DeleteFunc: func(event event.DeleteEvent) bool { return false }, - GenericFunc: func(event event.GenericEvent) bool { - event.Object.SetAnnotations(map[string]string{models.ResourceStateAnnotation: models.GenericEvent}) - return true - }, })).Complete(r) } diff --git a/controllers/clusterresources/gcpvpcpeering_controller_test.go b/controllers/clusterresources/gcpvpcpeering_controller_test.go index bcc3c116b..4bd64ae0d 100644 --- a/controllers/clusterresources/gcpvpcpeering_controller_test.go +++ b/controllers/clusterresources/gcpvpcpeering_controller_test.go @@ -33,8 +33,7 @@ var _ = Describe("Successful creation of a GCP VPC Peering resource", func() { Context("When setting up a GCP VPC Peering CRD", func() { gcpVPCPeeringSpec := v1beta1.GCPVPCPeeringSpec{ VPCPeeringSpec: v1beta1.VPCPeeringSpec{ - DataCentreID: "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2", - PeerSubnets: []string{"172.31.0.0/16", "192.168.0.0/16"}, + PeerSubnets: []string{"172.31.0.0/16", "192.168.0.0/16"}, }, PeerProjectID: "pid-132313", PeerVPCNetworkName: "vpc-123123123", @@ -55,6 +54,11 @@ var _ = Describe("Successful creation of a GCP VPC Peering resource", func() { It("Should create a GCP VPC Peering resources", func() { Expect(k8sClient.Create(ctx, &resource)).Should(Succeed()) + patch := resource.NewPatch() + resource.Status.CDCID = "375e4d1c-2f77-4d02-a6f2-1af617ff2ab2" + resource.Status.ResourceState = models.CreatingEvent + Expect(k8sClient.Status().Patch(ctx, &resource, patch)).Should(Succeed()) + By("Sending GCP VPC Peering Specification to Instaclustr API v2") var gcpVNetPeering v1beta1.GCPVPCPeering Eventually(func() bool { diff --git a/controllers/clusterresources/helpers.go b/controllers/clusterresources/helpers.go index 799ae9bae..12315255f 100644 --- a/controllers/clusterresources/helpers.go +++ b/controllers/clusterresources/helpers.go @@ -21,6 +21,7 @@ import ( k8sCore "k8s.io/api/core/v1" "k8s.io/utils/strings/slices" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/instaclustr/operator/apis/clusterresources/v1beta1" "github.com/instaclustr/operator/pkg/instaclustr" @@ -119,3 +120,10 @@ func subnetsEqual(subnets1, subnets2 []string) bool { return true } + +type Object interface { + client.Object + NewPatch() client.Patch + AttachToCluster(id string) + DetachFromCluster() +} diff --git a/controllers/clusterresources/postgresqluser_controller.go b/controllers/clusterresources/postgresqluser_controller.go index 71ac7aaa0..0727b77d6 100644 --- a/controllers/clusterresources/postgresqluser_controller.go +++ b/controllers/clusterresources/postgresqluser_controller.go @@ -414,18 +414,22 @@ func (r *PostgreSQLUserReconciler) createPostgreSQLFirewallRule( ObjectMeta: ctrl.ObjectMeta{ Name: firewallRuleName, Namespace: ns, - Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}, + Annotations: map[string]string{}, Labels: map[string]string{models.ClusterIDLabel: clusterID}, Finalizers: []string{models.DeletionFinalizer}, }, Spec: clusterresourcesv1beta1.ClusterNetworkFirewallRuleSpec{ FirewallRuleSpec: clusterresourcesv1beta1.FirewallRuleSpec{ - ClusterID: clusterID, - Type: models.PgAppType, + Type: models.PgAppType, }, Network: fmt.Sprintf("%s/%s", nodeAddress, "32"), }, - Status: clusterresourcesv1beta1.ClusterNetworkFirewallRuleStatus{}, + Status: clusterresourcesv1beta1.ClusterNetworkFirewallRuleStatus{ + FirewallRuleStatus: clusterresourcesv1beta1.FirewallRuleStatus{ + ClusterID: clusterID, + ResourceState: models.CreatingEvent, + }, + }, } err = r.Create(ctx, firewallRule) diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go index a22f993dd..36bb58726 100644 --- a/controllers/clusters/cadence_controller.go +++ b/controllers/clusters/cadence_controller.go @@ -363,6 +363,67 @@ func (r *CadenceReconciler) HandleUpdateCluster( return ctrl.Result{}, nil } +func (r *CadenceReconciler) handleClusterResourcesEvents( + newObj *v1beta1.Cadence, + oldObjSpec *v1beta1.CadenceSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterbackupRef, oldObjSpec.ClusterResources.ClusterBackups, newObj.Spec.ClusterResources.ClusterBackups, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *CadenceReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, cadence *v1beta1.Cadence) { + r.DetachClusterresources(ctx, l, cadence, cadence.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, cadence, cadence.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, cadence, cadence.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, cadence, cadence.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, cadence, cadence.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, cadence, cadence.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *CadenceReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, cadence *v1beta1.Cadence, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(cadence, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *CadenceReconciler) handleExternalChanges(cadence, iCadence *v1beta1.Cadence, l logr.Logger) (ctrl.Result, error) { if !cadence.Spec.AreDCsEqual(iCadence.Spec.DataCentres) { l.Info(msgExternalChanges, @@ -420,6 +481,8 @@ func (r *CadenceReconciler) HandleDeleteCluster( return ctrl.Result{}, err } + r.DetachClusterresourcesFromCluster(ctx, logger, cadence) + if !errors.Is(err, instaclustr.NotFound) { logger.Info("Sending cluster deletion to the Instaclustr API", "cluster name", cadence.Spec.Name, @@ -1202,6 +1265,8 @@ func (r *CadenceReconciler) SetupWithManager(mgr ctrl.Manager) error { return true } + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) + if oldObj.Generation == newObj.Generation { return false } diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go index c69128726..52054102b 100644 --- a/controllers/clusters/cassandra_controller.go +++ b/controllers/clusters/cassandra_controller.go @@ -424,6 +424,67 @@ func (r *CassandraReconciler) handleUpdateCluster( return models.ExitReconcile, nil } +func (r *CassandraReconciler) handleClusterResourcesEvents( + newObj *v1beta1.Cassandra, + oldObjSpec *v1beta1.CassandraSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterbackupRef, oldObjSpec.ClusterResources.ClusterBackups, newObj.Spec.ClusterResources.ClusterBackups, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *CassandraReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, cassandra *v1beta1.Cassandra) { + r.DetachClusterresources(ctx, l, cassandra, cassandra.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, cassandra, cassandra.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, cassandra, cassandra.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, cassandra, cassandra.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, cassandra, cassandra.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, cassandra, cassandra.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *CassandraReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, cassandra *v1beta1.Cassandra, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(cassandra, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *CassandraReconciler) handleExternalChanges(cassandra, iCassandra *v1beta1.Cassandra, l logr.Logger) (reconcile.Result, error) { if !cassandra.Spec.IsEqual(iCassandra.Spec) { l.Info(msgSpecStillNoMatch, @@ -575,6 +636,8 @@ func (r *CassandraReconciler) handleDeleteCluster( return reconcile.Result{}, err } + r.DetachClusterresourcesFromCluster(ctx, l, cassandra) + controllerutil.RemoveFinalizer(cassandra, models.DeletionFinalizer) cassandra.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, cassandra, patch) @@ -1071,6 +1134,10 @@ func (r *CassandraReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } + oldObj := event.ObjectOld.(*v1beta1.Cassandra) + + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) + newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent return true }, diff --git a/controllers/clusters/helpers.go b/controllers/clusters/helpers.go index 6ee2bb09f..44917262d 100644 --- a/controllers/clusters/helpers.go +++ b/controllers/clusters/helpers.go @@ -22,6 +22,7 @@ import ( "fmt" "sort" + "github.com/go-logr/logr" "github.com/hashicorp/go-version" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -30,10 +31,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" + clusterresourcesv1beta1 "github.com/instaclustr/operator/apis/clusterresources/v1beta1" "github.com/instaclustr/operator/apis/clusters/v1beta1" + "github.com/instaclustr/operator/controllers/clusterresources" "github.com/instaclustr/operator/pkg/models" ) +const CannotHandleUserEvent = "Cannot handle resource event. Reason: %v" + // confirmDeletion confirms if resource is deleting and set appropriate annotation. func confirmDeletion(obj client.Object) bool { annots := obj.GetAnnotations() @@ -175,6 +180,21 @@ func createSpecDifferenceMessage(k8sSpec, iSpec any) (string, error) { return msg + specDifference, nil } +func isClusterResourceRefExists(ref *v1beta1.ClusterResourceRef, compareRefs []*v1beta1.ClusterResourceRef) bool { + var exist bool + for _, compareRef := range compareRefs { + if *ref == *compareRef { + exist = true + break + } + } + + if exist { + return exist + } + return false +} + var msgDeleteClusterWithTwoFactorDelete = "Please confirm cluster deletion via email or phone. " + "If you have canceled a cluster deletion and want to put the cluster on deletion again, " + "remove \"triggered\" from Instaclustr.com/clusterDeletion annotation." @@ -222,3 +242,182 @@ type Object interface { client.Object NewPatch() client.Patch } + +func HandleCreateResource( + r client.Client, + ctx context.Context, + l logr.Logger, + kind string, + ref *v1beta1.ClusterResourceRef, + clusterID string, + CDCs []*v1beta1.DataCentreStatus, +) error { + req := types.NamespacedName{ + Namespace: ref.Namespace, + Name: ref.Name, + } + + var resource clusterresources.Object + var isCDC bool + cdcID := CDCs[0].ID + + switch kind { + case models.ClusterbackupRef: + resource = &clusterresourcesv1beta1.ClusterBackup{} + case models.ClusterNetworkFirewallRuleRef: + resource = &clusterresourcesv1beta1.ClusterNetworkFirewallRule{} + case models.AWSVPCPeeringRef: + resource = &clusterresourcesv1beta1.AWSVPCPeering{} + isCDC = true + case models.AWSSecurityGroupFirewallRuleRef: + resource = &clusterresourcesv1beta1.AWSSecurityGroupFirewallRule{} + case models.ExclusionWindowRef: + resource = &clusterresourcesv1beta1.ExclusionWindow{} + case models.GCPVPCPeeringRef: + resource = &clusterresourcesv1beta1.GCPVPCPeering{} + isCDC = true + case models.AzureVNetPeeringRef: + resource = &clusterresourcesv1beta1.AzureVNetPeering{} + isCDC = true + default: + l.Info("Provided reference to resource that is not supported", "kind", kind) + return nil + } + + if isCDC && ref.DataCentreName != "" { + for _, cdc := range CDCs { + if cdc.Name == ref.DataCentreName { + cdcID = cdc.ID + break + } + } + } + err := r.Get(ctx, req, resource) + if err != nil { + if k8serrors.IsNotFound(err) { + l.Error(err, "Provided resource is not found", "request", req) + return err + } + l.Error(err, "Cannot get cluster resource", "request", req) + return err + } + + patch := resource.NewPatch() + + if isCDC { + resource.AttachToCluster(cdcID) + } else { + resource.AttachToCluster(clusterID) + } + + err = r.Status().Patch(ctx, resource, patch) + if err != nil { + return err + } + + l.Info("PostgreSQL clusterresource was patched", + "Reference", ref, + "Resource Kind", kind, + "Event", models.CreatingEvent, + ) + + return nil +} + +func HandleDeleteResource( + r client.Client, + ctx context.Context, + l logr.Logger, + kind string, + ref *v1beta1.ClusterResourceRef, +) error { + req := types.NamespacedName{ + Namespace: ref.Namespace, + Name: ref.Name, + } + + var resource clusterresources.Object + + switch kind { + case models.ClusterNetworkFirewallRuleRef: + resource = &clusterresourcesv1beta1.ClusterNetworkFirewallRule{} + case models.AWSVPCPeeringRef: + resource = &clusterresourcesv1beta1.AWSVPCPeering{} + case models.AWSSecurityGroupFirewallRuleRef: + resource = &clusterresourcesv1beta1.AWSSecurityGroupFirewallRule{} + case models.ExclusionWindowRef: + resource = &clusterresourcesv1beta1.ExclusionWindow{} + case models.GCPVPCPeeringRef: + resource = &clusterresourcesv1beta1.GCPVPCPeering{} + case models.AzureVNetPeeringRef: + resource = &clusterresourcesv1beta1.AzureVNetPeering{} + default: + l.Info("Provided reference to resource that is not support deletion", "kind", kind) + return nil + } + + err := r.Get(ctx, req, resource) + if err != nil { + if k8serrors.IsNotFound(err) { + l.Error(err, "Cannot get a cluster resource. The resource is not found", "request", req) + return err + } + l.Error(err, "Cannot get cluster resource", "request", req) + return err + } + + patch := resource.NewPatch() + + resource.DetachFromCluster() + + err = r.Status().Patch(ctx, resource, patch) + if err != nil { + return err + } + + l.Info("PostgreSQL clusterresource was updated", + "Reference", ref, + "Resource Kind", kind, + "Event", models.DeletingEvent, + ) + + return nil +} + +func HandleResourceEvent( + r client.Client, + resourceKind string, + oldRefs, newRefs []*v1beta1.ClusterResourceRef, + clusterID string, + CDCs []*v1beta1.DataCentreStatus, +) error { + ctx := context.TODO() + l := log.FromContext(ctx) + + for _, ref := range newRefs { + exist := isClusterResourceRefExists(ref, oldRefs) + if exist { + continue + } + err := HandleCreateResource(r, ctx, l, resourceKind, ref, clusterID, CDCs) + if err != nil { + l.Error(err, "Cannot create clusterresource", "resource kind", resourceKind, "namespace and name", ref) + + return err + } + oldRefs = append(oldRefs, ref) + } + for _, oldRef := range oldRefs { + exist := isClusterResourceRefExists(oldRef, newRefs) + if exist { + continue + } + err := HandleDeleteResource(r, ctx, l, resourceKind, oldRef) + if err != nil { + l.Error(err, "Cannot delete clusterresource", "resource kind", resourceKind, "namespace and name", oldRef) + + return err + } + } + return nil +} diff --git a/controllers/clusters/kafka_controller.go b/controllers/clusters/kafka_controller.go index d076e73c0..31ec9a743 100644 --- a/controllers/clusters/kafka_controller.go +++ b/controllers/clusters/kafka_controller.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/instaclustr/operator/apis/clusters/v1beta1" - clusterresourcesv1beta1 "github.com/instaclustr/operator/apis/kafkamanagement/v1beta1" + kafkamanagementv1beta1 "github.com/instaclustr/operator/apis/kafkamanagement/v1beta1" "github.com/instaclustr/operator/pkg/exposeservice" "github.com/instaclustr/operator/pkg/instaclustr" "github.com/instaclustr/operator/pkg/models" @@ -322,6 +322,62 @@ func (r *KafkaReconciler) handleUpdateCluster( return models.ExitReconcile, nil } +func (r *KafkaReconciler) handleClusterResourcesEvents( + newObj *v1beta1.Kafka, + oldObjSpec *v1beta1.KafkaSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *KafkaReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, kafka *v1beta1.Kafka) { + r.DetachClusterresources(ctx, l, kafka, kafka.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, kafka, kafka.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, kafka, kafka.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, kafka, kafka.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, kafka, kafka.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, kafka, kafka.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *KafkaReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, kafka *v1beta1.Kafka, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(kafka, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *KafkaReconciler) handleExternalChanges(k, ik *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) { if !k.Spec.IsEqual(ik.Spec) { l.Info("The k8s specification is different from Instaclustr Console. Update operations are blocked.", @@ -433,6 +489,8 @@ func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, kafka *v1beta return reconcile.Result{}, err } + r.DetachClusterresourcesFromCluster(ctx, l, kafka) + r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.StatusChecker)) r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator)) controllerutil.RemoveFinalizer(kafka, models.DeletionFinalizer) @@ -704,7 +762,7 @@ func (r *KafkaReconciler) handleExternalDelete(ctx context.Context, kafka *v1bet } func (r *KafkaReconciler) NewUserResource() userObject { - return &clusterresourcesv1beta1.KafkaUser{} + return &kafkamanagementv1beta1.KafkaUser{} } // SetupWithManager sets up the controller with the Manager. @@ -744,6 +802,10 @@ func (r *KafkaReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } + oldObj := event.ObjectOld.(*v1beta1.Kafka) + + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) + newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent return true }, diff --git a/controllers/clusters/kafkaconnect_controller.go b/controllers/clusters/kafkaconnect_controller.go index 07879d330..bae9eaad8 100644 --- a/controllers/clusters/kafkaconnect_controller.go +++ b/controllers/clusters/kafkaconnect_controller.go @@ -292,6 +292,62 @@ func (r *KafkaConnectReconciler) handleUpdateCluster(ctx context.Context, kc *v1 return models.ExitReconcile, nil } +func (r *KafkaConnectReconciler) handleClusterResourcesEvents( + newObj *v1beta1.KafkaConnect, + oldObjSpec *v1beta1.KafkaConnectSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *KafkaConnectReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, kafkaConnect *v1beta1.KafkaConnect) { + r.DetachClusterresources(ctx, l, kafkaConnect, kafkaConnect.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, kafkaConnect, kafkaConnect.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, kafkaConnect, kafkaConnect.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, kafkaConnect, kafkaConnect.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, kafkaConnect, kafkaConnect.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, kafkaConnect, kafkaConnect.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *KafkaConnectReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, kafkaConnect *v1beta1.KafkaConnect, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(kafkaConnect, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *KafkaConnectReconciler) handleExternalChanges(k, ik *v1beta1.KafkaConnect, l logr.Logger) (reconcile.Result, error) { if !k.Spec.IsEqual(ik.Spec) { l.Info(msgSpecStillNoMatch, @@ -347,6 +403,8 @@ func (r *KafkaConnectReconciler) handleDeleteCluster(ctx context.Context, kc *v1 patch := kc.NewPatch() + r.DetachClusterresourcesFromCluster(ctx, l, kc) + if !errors.Is(err, instaclustr.NotFound) { l.Info("Sending cluster deletion to the Instaclustr API", "cluster name", kc.Spec.Name, @@ -616,6 +674,10 @@ func (r *KafkaConnectReconciler) SetupWithManager(mgr ctrl.Manager) error { } newObj := event.ObjectNew.(*v1beta1.KafkaConnect) + oldObj := event.ObjectOld.(*v1beta1.KafkaConnect) + + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) + if newObj.Generation == event.ObjectOld.GetGeneration() { return false } diff --git a/controllers/clusters/opensearch_controller.go b/controllers/clusters/opensearch_controller.go index b34807a30..16ea2e630 100644 --- a/controllers/clusters/opensearch_controller.go +++ b/controllers/clusters/opensearch_controller.go @@ -402,6 +402,67 @@ func (r *OpenSearchReconciler) HandleUpdateCluster( return models.ExitReconcile, nil } +func (r *OpenSearchReconciler) handleClusterResourcesEvents( + newObj *v1beta1.OpenSearch, + oldObjSpec *v1beta1.OpenSearchSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterbackupRef, oldObjSpec.ClusterResources.ClusterBackups, newObj.Spec.ClusterResources.ClusterBackups, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *OpenSearchReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, openSearch *v1beta1.OpenSearch) { + r.DetachClusterresources(ctx, l, openSearch, openSearch.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, openSearch, openSearch.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, openSearch, openSearch.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, openSearch, openSearch.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, openSearch, openSearch.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, openSearch, openSearch.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *OpenSearchReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, openSearch *v1beta1.OpenSearch, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(openSearch, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *OpenSearchReconciler) handleExternalChanges(o, iO *v1beta1.OpenSearch, l logr.Logger) (reconcile.Result, error) { if !o.Spec.IsEqual(iO.Spec) { l.Info(msgSpecStillNoMatch, @@ -537,6 +598,8 @@ func (r *OpenSearchReconciler) HandleDeleteCluster( return reconcile.Result{}, err } + r.DetachClusterresourcesFromCluster(ctx, logger, o) + controllerutil.RemoveFinalizer(o, models.DeletionFinalizer) err = r.Patch(ctx, o, patch) if err != nil { @@ -991,6 +1054,10 @@ func (r *OpenSearchReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } + oldObj := event.ObjectOld.(*v1beta1.OpenSearch) + + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) + newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent return true }, diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go index 046f3f383..24698f7d7 100644 --- a/controllers/clusters/postgresql_controller.go +++ b/controllers/clusters/postgresql_controller.go @@ -494,6 +494,67 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( return models.ExitReconcile, nil } +func (r *PostgreSQLReconciler) handleClusterResourcesEvents( + newObj *v1beta1.PostgreSQL, + oldObjSpec *v1beta1.PgSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterbackupRef, oldObjSpec.ClusterResources.ClusterBackups, newObj.Spec.ClusterResources.ClusterBackups, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *PostgreSQLReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, pg *v1beta1.PostgreSQL) { + r.DetachClusterresources(ctx, l, pg, pg.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, pg, pg.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, pg, pg.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, pg, pg.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, pg, pg.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, pg, pg.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *PostgreSQLReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, pg *v1beta1.PostgreSQL, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(pg, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *PostgreSQLReconciler) createUser( ctx context.Context, l logr.Logger, @@ -899,6 +960,8 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( } } + r.DetachClusterresourcesFromCluster(ctx, logger, pg) + controllerutil.RemoveFinalizer(pg, models.DeletionFinalizer) pg.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.patchClusterMetadata(ctx, pg, logger) @@ -1685,6 +1748,7 @@ func (r *PostgreSQLReconciler) SetupWithManager(mgr ctrl.Manager) error { oldObj := event.ObjectOld.(*v1beta1.PostgreSQL) r.handleUserEvent(newObj, oldObj.Spec.UserRefs) + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) event.ObjectNew.GetAnnotations()[models.ResourceStateAnnotation] = models.UpdatingEvent return true diff --git a/controllers/clusters/redis_controller.go b/controllers/clusters/redis_controller.go index e11f6d03f..8192affd9 100644 --- a/controllers/clusters/redis_controller.go +++ b/controllers/clusters/redis_controller.go @@ -423,6 +423,67 @@ func (r *RedisReconciler) handleUpdateCluster( return models.ExitReconcile, nil } +func (r *RedisReconciler) handleClusterResourcesEvents( + newObj *v1beta1.Redis, + oldObjSpec *v1beta1.RedisSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterbackupRef, oldObjSpec.ClusterResources.ClusterBackups, newObj.Spec.ClusterResources.ClusterBackups, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *RedisReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, redis *v1beta1.Redis) { + r.DetachClusterresources(ctx, l, redis, redis.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, redis, redis.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, redis, redis.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, redis, redis.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, redis, redis.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, redis, redis.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *RedisReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, redis *v1beta1.Redis, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(redis, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *RedisReconciler) handleExternalChanges(redis, iRedis *v1beta1.Redis, l logr.Logger) (reconcile.Result, error) { if !redis.Spec.IsEqual(iRedis.Spec) { l.Info(msgSpecStillNoMatch, @@ -568,6 +629,8 @@ func (r *RedisReconciler) handleDeleteCluster( return reconcile.Result{}, err } + r.DetachClusterresourcesFromCluster(ctx, logger, redis) + patch := redis.NewPatch() controllerutil.RemoveFinalizer(redis, models.DeletionFinalizer) redis.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -1013,6 +1076,10 @@ func (r *RedisReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } + oldObj := event.ObjectOld.(*v1beta1.Redis) + + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) + newObj.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent return true }, diff --git a/controllers/clusters/zookeeper_controller.go b/controllers/clusters/zookeeper_controller.go index 8a3791e12..2261c421f 100644 --- a/controllers/clusters/zookeeper_controller.go +++ b/controllers/clusters/zookeeper_controller.go @@ -266,6 +266,62 @@ func (r *ZookeeperReconciler) handleUpdateCluster( return models.ExitReconcile, nil } +func (r *ZookeeperReconciler) handleClusterResourcesEvents( + newObj *v1beta1.Zookeeper, + oldObjSpec *v1beta1.ZookeeperSpec, +) { + err := HandleResourceEvent(r.Client, models.ClusterNetworkFirewallRuleRef, oldObjSpec.ClusterResources.ClusterNetworkFirewallRules, newObj.Spec.ClusterResources.ClusterNetworkFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSVPCPeeringRef, oldObjSpec.ClusterResources.AWSVPCPeerings, newObj.Spec.ClusterResources.AWSVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AWSSecurityGroupFirewallRuleRef, oldObjSpec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Spec.ClusterResources.AWSSecurityGroupFirewallRules, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.ExclusionWindowRef, oldObjSpec.ClusterResources.ExclusionWindows, newObj.Spec.ClusterResources.ExclusionWindows, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.GCPVPCPeeringRef, oldObjSpec.ClusterResources.GCPVPCPeerings, newObj.Spec.ClusterResources.GCPVPCPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } + err = HandleResourceEvent(r.Client, models.AzureVNetPeeringRef, oldObjSpec.ClusterResources.AzureVNetPeerings, newObj.Spec.ClusterResources.AzureVNetPeerings, newObj.Status.ID, newObj.Status.DataCentres) + if err != nil { + r.EventRecorder.Eventf(newObj, models.Warning, models.CreatingEvent, + CannotHandleUserEvent, err) + } +} + +func (r *ZookeeperReconciler) DetachClusterresourcesFromCluster(ctx context.Context, l logr.Logger, zookeeper *v1beta1.Zookeeper) { + r.DetachClusterresources(ctx, l, zookeeper, zookeeper.Spec.ClusterResources.ClusterNetworkFirewallRules, models.ClusterNetworkFirewallRuleRef) + r.DetachClusterresources(ctx, l, zookeeper, zookeeper.Spec.ClusterResources.AWSVPCPeerings, models.AWSVPCPeeringRef) + r.DetachClusterresources(ctx, l, zookeeper, zookeeper.Spec.ClusterResources.AWSSecurityGroupFirewallRules, models.AWSSecurityGroupFirewallRuleRef) + r.DetachClusterresources(ctx, l, zookeeper, zookeeper.Spec.ClusterResources.ExclusionWindows, models.ExclusionWindowRef) + r.DetachClusterresources(ctx, l, zookeeper, zookeeper.Spec.ClusterResources.GCPVPCPeerings, models.GCPVPCPeeringRef) + r.DetachClusterresources(ctx, l, zookeeper, zookeeper.Spec.ClusterResources.AzureVNetPeerings, models.AzureVNetPeeringRef) +} + +func (r *ZookeeperReconciler) DetachClusterresources(ctx context.Context, l logr.Logger, zookeeper *v1beta1.Zookeeper, refs []*v1beta1.ClusterResourceRef, kind string) { + for _, ref := range refs { + err := HandleDeleteResource(r.Client, ctx, l, kind, ref) + if err != nil { + l.Error(err, "Cannot detach clusterresource", "resource kind", kind, "namespace and name", ref) + r.EventRecorder.Eventf(zookeeper, models.Warning, models.DeletingEvent, + "Cannot detach resource. Reason: %v", err) + } + } +} + func (r *ZookeeperReconciler) handleExternalChanges(zook *v1beta1.Zookeeper, l logr.Logger) (reconcile.Result, error) { iData, err := r.API.GetZookeeper(zook.Status.ID) if err != nil { @@ -338,6 +394,8 @@ func (r *ZookeeperReconciler) handleDeleteCluster( patch := zook.NewPatch() + r.DetachClusterresourcesFromCluster(ctx, l, zook) + if !errors.Is(err, instaclustr.NotFound) { l.Info("Sending cluster deletion to the Instaclustr API", "cluster name", zook.Spec.Name, @@ -584,6 +642,7 @@ func (r *ZookeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { }, UpdateFunc: func(event event.UpdateEvent) bool { newObj := event.ObjectNew.(*v1beta1.Zookeeper) + oldObj := event.ObjectOld.(*v1beta1.Zookeeper) if event.ObjectNew.GetAnnotations()[models.ResourceStateAnnotation] == models.DeletedEvent { return false @@ -597,6 +656,8 @@ func (r *ZookeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { return true } + r.handleClusterResourcesEvents(newObj, &oldObj.Spec) + if newObj.Generation == event.ObjectOld.GetGeneration() { return false } diff --git a/pkg/instaclustr/client.go b/pkg/instaclustr/client.go index 61e65cd61..13a956009 100644 --- a/pkg/instaclustr/client.go +++ b/pkg/instaclustr/client.go @@ -598,14 +598,117 @@ func (c *Client) GetPeeringStatus(peerID, return &peeringStatus, nil } -func (c *Client) CreatePeering(url string, peeringSpec any) (*clusterresourcesv1beta1.PeeringStatus, error) { +func (c *Client) CreateAzureVNetPeering(peeringSpec *clusterresourcesv1beta1.AzureVNetPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) { + payload := &struct { + PeerSubnets []string `json:"peerSubnets"` + PeerResourceGroup string `json:"peerResourceGroup"` + PeerSubscriptionID string `json:"peerSubscriptionId"` + PeerADObjectID string `json:"peerAdObjectId,omitempty"` + PeerVirtualNetworkName string `json:"peerVirtualNetworkName"` + CDCid string `json:"cdcId"` + }{ + PeerSubnets: peeringSpec.PeerSubnets, + PeerResourceGroup: peeringSpec.PeerResourceGroup, + PeerADObjectID: peeringSpec.PeerADObjectID, + PeerSubscriptionID: peeringSpec.PeerSubscriptionID, + PeerVirtualNetworkName: peeringSpec.PeerVirtualNetworkName, + CDCid: cdcId, + } - jsonDataCreate, err := json.Marshal(peeringSpec) + jsonDataCreate, err := json.Marshal(payload) if err != nil { return nil, err } - url = c.serverHostname + url + url := c.serverHostname + AzurePeeringEndpoint + resp, err := c.DoRequest(url, http.MethodPost, jsonDataCreate) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusAccepted { + return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + var creationResponse *clusterresourcesv1beta1.PeeringStatus + err = json.Unmarshal(body, &creationResponse) + if err != nil { + return nil, err + } + + return creationResponse, nil +} + +func (c *Client) CreateAWSVPCPeering(peeringSpec *clusterresourcesv1beta1.AWSVPCPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) { + payload := &struct { + PeerSubnets []string `json:"peerSubnets"` + PeerAWSAccountID string `json:"peerAwsAccountId"` + PeerVPCID string `json:"peerVpcId"` + PeerRegion string `json:"peerRegion,omitempty"` + CDCid string `json:"cdcId"` + }{ + PeerSubnets: peeringSpec.PeerSubnets, + PeerAWSAccountID: peeringSpec.PeerAWSAccountID, + PeerVPCID: peeringSpec.PeerVPCID, + PeerRegion: peeringSpec.PeerRegion, + CDCid: cdcId, + } + + jsonDataCreate, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + url := c.serverHostname + AWSPeeringEndpoint + resp, err := c.DoRequest(url, http.MethodPost, jsonDataCreate) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusAccepted { + return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + var creationResponse *clusterresourcesv1beta1.PeeringStatus + err = json.Unmarshal(body, &creationResponse) + if err != nil { + return nil, err + } + + return creationResponse, nil +} + +func (c *Client) CreateGCPVPCPeering(peeringSpec *clusterresourcesv1beta1.GCPVPCPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) { + payload := &struct { + PeerSubnets []string `json:"peerSubnets"` + PeerVPCNetworkName string `json:"peerVpcNetworkName"` + PeerProjectID string `json:"peerProjectId"` + CDCid string `json:"cdcId"` + }{ + PeerSubnets: peeringSpec.PeerSubnets, + PeerVPCNetworkName: peeringSpec.PeerVPCNetworkName, + PeerProjectID: peeringSpec.PeerProjectID, + CDCid: cdcId, + } + + jsonDataCreate, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + url := c.serverHostname + GCPPeeringEndpoint resp, err := c.DoRequest(url, http.MethodPost, jsonDataCreate) if err != nil { return nil, err @@ -718,16 +821,70 @@ func (c *Client) GetFirewallRuleStatus( return firewallRuleStatus, nil } -func (c *Client) CreateFirewallRule( - url string, - firewallRuleSpec any, +func (c *Client) CreateAWSSecurityGroupFirewallRule( + firewallRuleSpec *clusterresourcesv1beta1.AWSSecurityGroupFirewallRuleSpec, + clusterID string, ) (*clusterresourcesv1beta1.FirewallRuleStatus, error) { - jsonFirewallRule, err := json.Marshal(firewallRuleSpec) + payload := &struct { + SecurityGroupID string `json:"securityGroupId"` + ClusterID string `json:"clusterId,omitempty"` + Type string `json:"type"` + }{ + SecurityGroupID: firewallRuleSpec.SecurityGroupID, + ClusterID: clusterID, + Type: firewallRuleSpec.Type, + } + + jsonFirewallRule, err := json.Marshal(payload) if err != nil { return nil, err } - url = c.serverHostname + url + url := c.serverHostname + AWSSecurityGroupFirewallRuleEndpoint + resp, err := c.DoRequest(url, http.MethodPost, jsonFirewallRule) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusAccepted { + return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + var creationResponse *clusterresourcesv1beta1.FirewallRuleStatus + err = json.Unmarshal(body, &creationResponse) + if err != nil { + return nil, err + } + + return creationResponse, nil +} + +func (c *Client) CreateClusterNetworkFirewallRule( + firewallRuleSpec *clusterresourcesv1beta1.ClusterNetworkFirewallRuleSpec, + clusterID string, +) (*clusterresourcesv1beta1.FirewallRuleStatus, error) { + payload := &struct { + ClusterID string `json:"clusterId"` + Type string `json:"type"` + Network string `json:"network"` + }{ + ClusterID: clusterID, + Type: firewallRuleSpec.Type, + Network: firewallRuleSpec.Network, + } + + jsonFirewallRule, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + url := c.serverHostname + ClusterNetworkFirewallRuleEndpoint resp, err := c.DoRequest(url, http.MethodPost, jsonFirewallRule) if err != nil { return nil, err diff --git a/pkg/instaclustr/interfaces.go b/pkg/instaclustr/interfaces.go index e72383756..cb5187943 100644 --- a/pkg/instaclustr/interfaces.go +++ b/pkg/instaclustr/interfaces.go @@ -37,9 +37,12 @@ type API interface { GetAWSVPCPeering(peerID string) (*models.AWSVPCPeering, error) UpdatePeering(peerID, peeringEndpoint string, peerSpec any) error DeletePeering(peerID, peeringEndpoint string) error - CreatePeering(url string, peeringSpec any) (*clusterresourcesv1beta1.PeeringStatus, error) + CreateAzureVNetPeering(peeringSpec *clusterresourcesv1beta1.AzureVNetPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) + CreateGCPVPCPeering(peeringSpec *clusterresourcesv1beta1.GCPVPCPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) + CreateAWSVPCPeering(peeringSpec *clusterresourcesv1beta1.AWSVPCPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) GetFirewallRuleStatus(firewallRuleID string, firewallRuleEndpoint string) (*clusterresourcesv1beta1.FirewallRuleStatus, error) - CreateFirewallRule(url string, firewallRuleSpec any) (*clusterresourcesv1beta1.FirewallRuleStatus, error) + CreateAWSSecurityGroupFirewallRule(firewallRuleSpec *clusterresourcesv1beta1.AWSSecurityGroupFirewallRuleSpec, clusterID string) (*clusterresourcesv1beta1.FirewallRuleStatus, error) + CreateClusterNetworkFirewallRule(firewallRuleSpec *clusterresourcesv1beta1.ClusterNetworkFirewallRuleSpec, clusterID string) (*clusterresourcesv1beta1.FirewallRuleStatus, error) DeleteFirewallRule(firewallRuleID string, firewallRuleEndpoint string) error CreateKafkaUser(url string, kafkaUser *models.KafkaUser) (*kafkamanagementv1beta1.KafkaUserStatus, error) UpdateKafkaUser(kafkaUserID string, kafkaUserSpec *models.KafkaUser) error diff --git a/pkg/instaclustr/mock/client.go b/pkg/instaclustr/mock/client.go index 836f25931..ea3414bd9 100644 --- a/pkg/instaclustr/mock/client.go +++ b/pkg/instaclustr/mock/client.go @@ -79,7 +79,27 @@ func (c *mockClient) DeletePeering(peerID, peeringEndpoint string) error { panic("DeletePeering: is not implemented") } -func (c *mockClient) CreatePeering(url string, peeringSpec any) (*clusterresourcesv1beta1.PeeringStatus, error) { +func (c *mockClient) CreateAzureVNetPeering(peeringSpec *clusterresourcesv1beta1.AzureVNetPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) { + ps := &clusterresourcesv1beta1.PeeringStatus{ + ID: StatusID, + Name: "name", + StatusCode: "statusCode", + FailureReason: "failureReason", + } + return ps, nil +} + +func (c *mockClient) CreateAWSVPCPeering(peeringSpec *clusterresourcesv1beta1.AWSVPCPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) { + ps := &clusterresourcesv1beta1.PeeringStatus{ + ID: StatusID, + Name: "name", + StatusCode: "statusCode", + FailureReason: "failureReason", + } + return ps, nil +} + +func (c *mockClient) CreateGCPVPCPeering(peeringSpec *clusterresourcesv1beta1.GCPVPCPeeringSpec, cdcId string) (*clusterresourcesv1beta1.PeeringStatus, error) { ps := &clusterresourcesv1beta1.PeeringStatus{ ID: StatusID, Name: "name", @@ -98,7 +118,15 @@ func (c *mockClient) GetFirewallRuleStatus(firewallRuleID string, firewallRuleEn return fwRule, nil } -func (c *mockClient) CreateFirewallRule(url string, firewallRuleSpec any) (*clusterresourcesv1beta1.FirewallRuleStatus, error) { +func (c *mockClient) CreateAWSSecurityGroupFirewallRule(firewallRuleSpec *clusterresourcesv1beta1.AWSSecurityGroupFirewallRuleSpec, clusterID string) (*clusterresourcesv1beta1.FirewallRuleStatus, error) { + fwRule := &clusterresourcesv1beta1.FirewallRuleStatus{ + ID: StatusID, + Status: "OK", + DeferredReason: "NO", + } + return fwRule, nil +} +func (c *mockClient) CreateClusterNetworkFirewallRule(firewallRuleSpec *clusterresourcesv1beta1.ClusterNetworkFirewallRuleSpec, clusterID string) (*clusterresourcesv1beta1.FirewallRuleStatus, error) { fwRule := &clusterresourcesv1beta1.FirewallRuleStatus{ ID: StatusID, Status: "OK", diff --git a/pkg/models/operator.go b/pkg/models/operator.go index 3b46ad639..b088f154a 100644 --- a/pkg/models/operator.go +++ b/pkg/models/operator.go @@ -106,6 +106,14 @@ const ( CassandraAppType = "APACHE_CASSANDRA" SparkAppType = "SPARK" + ClusterbackupRef = "ClusterBackup" + ClusterNetworkFirewallRuleRef = "ClusterNetworkFirewallRule" + AWSVPCPeeringRef = "AWSVPCPeering" + AWSSecurityGroupFirewallRuleRef = "AWSSecurityGroupFirewallRule" + ExclusionWindowRef = "ExclusionWindow" + GCPVPCPeeringRef = "GCPVPCPeering" + AzureVNetPeeringRef = "AzureVNetPeering" + DefaultPgUsernameValue = "icpostgresql" DefaultPgDbNameValue = "postgres" DefaultPgDbPortValue = 5432