Skip to content

Commit

Permalink
feat: volume snapshot backup for pg clusters (#570)
Browse files Browse the repository at this point in the history
* feat: volume snapshot backup for pg clusters

* fix: lint errors
  • Loading branch information
wai-wong-edb authored Aug 5, 2024
1 parent b48ae41 commit ece83ea
Show file tree
Hide file tree
Showing 10 changed files with 61 additions and 42 deletions.
2 changes: 2 additions & 0 deletions docs/resources/cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ resource "biganimal_cluster" "single_node_cluster" {
pgvector = false
post_gis = false
pg_bouncer = {
is_enabled = false
# settings = [ # If is_enabled is true, remove the comment and enter the settings. Should you prefer something different from the defaults.
Expand Down Expand Up @@ -275,6 +276,7 @@ output "faraway_replica_ids" {
- `storage` (Block, Optional) Storage. (see [below for nested schema](#nestedblock--storage))
- `superuser_access` (Boolean) Enable to grant superuser access to the edb_admin role.
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
- `volume_snapshot_backup`(Boolean) Enable to take a snapshot of the volume.

### Read-Only

Expand Down
4 changes: 4 additions & 0 deletions examples/data-sources/biganimal_cluster/data-source.tf
Original file line number Diff line number Diff line change
Expand Up @@ -112,3 +112,7 @@ output "pe_allowed_principal_ids" {
output "service_account_ids" {
value = data.biganimal_cluster.this.service_account_ids
}

output "volume_snapshot_backup" {
value = coalesce(data.biganimal_cluster.this.post_gis, false)
}
19 changes: 10 additions & 9 deletions examples/resources/biganimal_cluster/ha/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,16 @@ resource "biganimal_cluster" "ha_cluster" {
start_time = "03:00"
}

pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:aws" // "bah:aws" uses BigAnimal's cloud account AWS, use "aws" for your cloud account
read_only_connections = true
region = "us-east-1"
superuser_access = true
pgvector = false
post_gis = false
pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:aws" // "bah:aws" uses BigAnimal's cloud account AWS, use "aws" for your cloud account
read_only_connections = true
region = "us-east-1"
superuser_access = true
pgvector = false
post_gis = false
volume_snapshot_backup = false

pg_bouncer = {
is_enabled = false
Expand Down
19 changes: 10 additions & 9 deletions examples/resources/biganimal_cluster/single_node/aws/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,16 @@ resource "biganimal_cluster" "single_node_cluster" {
start_time = "03:00"
}

pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:aws" // "bah:aws" uses BigAnimal's cloud account AWS, use "aws" for your cloud account
read_only_connections = false
region = "us-east-1"
superuser_access = true
pgvector = false
post_gis = false
pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:aws" // "bah:aws" uses BigAnimal's cloud account AWS, use "aws" for your cloud account
read_only_connections = false
region = "us-east-1"
superuser_access = true
pgvector = false
post_gis = false
volume_snapshot_backup = false

pg_bouncer = {
is_enabled = false
Expand Down
20 changes: 11 additions & 9 deletions examples/resources/biganimal_cluster/single_node/azure/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,17 @@ resource "biganimal_cluster" "single_node_cluster" {
start_time = "03:00"
}

pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:azure" // "bah:azure" uses BigAnimal's cloud account Azure, use "azure" for your cloud account
read_only_connections = false
region = "eastus2"
superuser_access = true
pgvector = false
post_gis = false
pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:azure" // "bah:azure" uses BigAnimal's cloud account Azure, use "azure" for your cloud account
read_only_connections = false
region = "eastus2"
superuser_access = true
pgvector = false
post_gis = false
volume_snapshot_backup = false


pg_bouncer = {
is_enabled = false
Expand Down
20 changes: 11 additions & 9 deletions examples/resources/biganimal_cluster/single_node/gcp/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,17 @@ resource "biganimal_cluster" "single_node_cluster" {
start_time = "03:00"
}

pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:gcp" // "bah:gpc" uses BigAnimal's cloud account Google Cloud provider, use "gcp" for your cloud account
read_only_connections = false
region = "us-east1"
superuser_access = true
pgvector = false
post_gis = false
pg_type = "epas"
pg_version = "15"
private_networking = false
cloud_provider = "bah:gcp" // "bah:gpc" uses BigAnimal's cloud account Google Cloud provider, use "gcp" for your cloud account
read_only_connections = false
region = "us-east1"
superuser_access = true
pgvector = false
post_gis = false
volume_snapshot_backup = false


pg_bouncer = {
is_enabled = false
Expand Down
1 change: 1 addition & 0 deletions pkg/models/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ type Cluster struct {
SuperuserAccess *bool `json:"superuserAccess,omitempty"`
Extensions *[]ClusterExtension `json:"extensions,omitempty"`
PgBouncer *PgBouncer `json:"pgBouncer,omitempty"`
VolumeSnapshot *bool `json:"volumeSnapshotBackup,omitempty"`
EncryptionKeyIdReq *string `json:"keyId,omitempty"`
EncryptionKeyResp *EncryptionKey `json:"encryptionKey,omitempty"`
PgIdentity *string `json:"pgIdentity,omitempty"`
Expand Down
6 changes: 0 additions & 6 deletions pkg/plan_modifier/pg_bouncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,6 @@ func (m CustomPgBouncerModifier) PlanModifyObject(ctx context.Context, req planm
return
}

if !reqPlanIsEnabled.ValueBool() && !reqPlanSettings.IsUnknown() && len(reqPlanSettings.Elements()) == 0 {
resp.Diagnostics.AddError("if pg_bouncer.is_enabled = false then pg_bouncer.settings cannot be []", "please remove pg_bouncer.settings or set pg_bouncer.settings = null")

return
}

// if is_enabled = false and settings is null and state setting is null then use state value for unknown
if !reqPlanIsEnabled.ValueBool() &&
req.ConfigValue.Attributes()["settings"].(basetypes.SetValue).IsNull() &&
Expand Down
4 changes: 4 additions & 0 deletions pkg/provider/data_source_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,10 @@ func (c *clusterDataSource) Schema(ctx context.Context, req datasource.SchemaReq
Optional: true,
Computed: true,
},
"volume_snapshot_backup": schema.BoolAttribute{
MarkdownDescription: "Volume snapshot.",
Optional: true,
},
},
}
}
Expand Down
8 changes: 8 additions & 0 deletions pkg/provider/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ type ClusterResourceModel struct {
TransparentDataEncryption *TransparentDataEncryptionModel `tfsdk:"transparent_data_encryption"`
PgIdentity types.String `tfsdk:"pg_identity"`
TransparentDataEncryptionAction types.String `tfsdk:"transparent_data_encryption_action"`
VolumeSnapshot types.Bool `tfsdk:"volume_snapshot_backup"`

Timeouts timeouts.Value `tfsdk:"timeouts"`
}
Expand Down Expand Up @@ -440,6 +441,11 @@ func (c *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest
Optional: true,
Computed: true,
},
"volume_snapshot_backup": schema.BoolAttribute{
MarkdownDescription: "Enable to take a snapshot of the volume.",
Optional: true,
PlanModifiers: []planmodifier.Bool{boolplanmodifier.UseStateForUnknown()},
},
"pgvector": schema.BoolAttribute{
MarkdownDescription: "Is pgvector extension enabled. Adds support for vector storage and vector similarity search to Postgres.",
Optional: true,
Expand Down Expand Up @@ -818,6 +824,7 @@ func readCluster(ctx context.Context, client *api.ClusterClient, tfClusterResour
tfClusterResource.PrivateNetworking = types.BoolPointerValue(responseCluster.PrivateNetworking)
tfClusterResource.SuperuserAccess = types.BoolPointerValue(responseCluster.SuperuserAccess)
tfClusterResource.PgIdentity = types.StringPointerValue(responseCluster.PgIdentity)
tfClusterResource.VolumeSnapshot = types.BoolPointerValue(responseCluster.VolumeSnapshot)

if responseCluster.EncryptionKeyResp != nil && *responseCluster.Phase != constants.PHASE_HEALTHY {
if !tfClusterResource.PgIdentity.IsNull() && tfClusterResource.PgIdentity.ValueString() != "" {
Expand Down Expand Up @@ -1050,6 +1057,7 @@ func (c *clusterResource) generateGenericClusterModel(ctx context.Context, clust
ReadOnlyConnections: clusterResource.ReadOnlyConnections.ValueBoolPointer(),
BackupRetentionPeriod: clusterResource.BackupRetentionPeriod.ValueStringPointer(),
SuperuserAccess: clusterResource.SuperuserAccess.ValueBoolPointer(),
VolumeSnapshot: clusterResource.VolumeSnapshot.ValueBoolPointer(),
}

cluster.Extensions = &[]models.ClusterExtension{}
Expand Down

0 comments on commit ece83ea

Please sign in to comment.