From 1d239d5ff1781d3923e6356aaa6c4d5d100e0679 Mon Sep 17 00:00:00 2001 From: Corey Lane Date: Fri, 14 Jul 2023 22:25:25 -0500 Subject: [PATCH 01/38] add snapshot_policy to aws_fsx_ontap_volume --- internal/service/fsx/ontap_volume.go | 15 +++++++++++++++ website/docs/r/fsx_ontap_volume.html.markdown | 1 + 2 files changed, 16 insertions(+) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index f5d81e2eba9..5bd34e1bbf9 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -93,6 +93,12 @@ func ResourceOntapVolume() *schema.Resource { Optional: true, Default: false, }, + "snapshot_policy": { + Type: schema.TypeString, + Optional: true, + Default: false, + ValidateFunc: validation.StringInSlice([]string{"default", "default-1weekly", "none"}, false), + }, "storage_efficiency_enabled": { Type: schema.TypeBool, Optional: true, @@ -169,6 +175,10 @@ func resourceOntapVolumeCreate(ctx context.Context, d *schema.ResourceData, meta input.OntapConfiguration.SecurityStyle = aws.String(v.(string)) } + if v, ok := d.GetOk("snapshot_policy"); ok { + input.OntapConfiguration.SnapshotPolicy = aws.String(v.(string)) + } + if v, ok := d.GetOkExists("storage_efficiency_enabled"); ok { input.OntapConfiguration.StorageEfficiencyEnabled = aws.Bool(v.(bool)) } @@ -220,6 +230,7 @@ func resourceOntapVolumeRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("ontap_volume_type", ontapConfig.OntapVolumeType) d.Set("security_style", ontapConfig.SecurityStyle) d.Set("size_in_megabytes", ontapConfig.SizeInMegabytes) + d.Set("snapshot_policy", ontapConfig.SnapshotPolicy) d.Set("storage_efficiency_enabled", ontapConfig.StorageEfficiencyEnabled) d.Set("storage_virtual_machine_id", ontapConfig.StorageVirtualMachineId) if err := d.Set("tiering_policy", flattenOntapVolumeTieringPolicy(ontapConfig.TieringPolicy)); err != nil { @@ -254,6 +265,10 @@ func resourceOntapVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta input.OntapConfiguration.SizeInMegabytes = aws.Int64(int64(d.Get("size_in_megabytes").(int))) } + if d.HasChange("snapshot_policy") { + input.OntapConfiguration.SnapshotPolicy = aws.String(d.Get("snapshot_policy").(string)) + } + if d.HasChange("storage_efficiency_enabled") { input.OntapConfiguration.StorageEfficiencyEnabled = aws.Bool(d.Get("storage_efficiency_enabled").(bool)) } diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index 61410fa873c..d8409323af9 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -54,6 +54,7 @@ The following arguments are supported: * `security_style` - (Optional) Specifies the volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. * `size_in_megabytes` - (Required) Specifies the size of the volume, in megabytes (MB), that you are creating. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the volume is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. +* `snapshot_policy` - (Optional) Specifies the snapshot policy for the volume. Valid values are `default`, `default-1weekly`, and `none`. * `storage_efficiency_enabled` - (Optional) Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume. * `storage_virtual_machine_id` - (Required) Specifies the storage virtual machine in which to create the volume. * `tags` - (Optional) A map of tags to assign to the volume. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. From 1b31de89628ec859a88e43a3c7c9d1bc5c69051c Mon Sep 17 00:00:00 2001 From: Corey Lane Date: Sun, 16 Jul 2023 22:07:44 -0500 Subject: [PATCH 02/38] update snapshot test and attribute --- internal/service/fsx/ontap_volume.go | 4 ++-- internal/service/fsx/ontap_volume_test.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 5bd34e1bbf9..0287da528b7 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -96,8 +96,8 @@ func ResourceOntapVolume() *schema.Resource { "snapshot_policy": { Type: schema.TypeString, Optional: true, - Default: false, - ValidateFunc: validation.StringInSlice([]string{"default", "default-1weekly", "none"}, false), + Computed: true, + ValidateFunc: validation.StringLenBetween(1, 255), }, "storage_efficiency_enabled": { Type: schema.TypeBool, diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 417295c0a24..d8f3723efcc 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -44,6 +44,7 @@ func TestAccFSxOntapVolume_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_style", ""), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", "1024"), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", "false"), + resource.TestCheckResourceAttr(resourceName, "snapshot_policy", "default"), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "true"), resource.TestCheckResourceAttrSet(resourceName, "storage_virtual_machine_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), From d5fa3fde66fb96f784dcf5b4e40e3b2767be3654 Mon Sep 17 00:00:00 2001 From: Corey Lane Date: Sun, 16 Jul 2023 22:21:43 -0500 Subject: [PATCH 03/38] fsx documentation --- website/docs/r/fsx_ontap_volume.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index d8409323af9..24a958a7bf5 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -54,7 +54,7 @@ The following arguments are supported: * `security_style` - (Optional) Specifies the volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. * `size_in_megabytes` - (Required) Specifies the size of the volume, in megabytes (MB), that you are creating. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the volume is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. -* `snapshot_policy` - (Optional) Specifies the snapshot policy for the volume. Valid values are `default`, `default-1weekly`, and `none`. +* `snapshot_policy` - (Optional) Specifies the snapshot policy for the volume. See [snapshot policies](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies) in the Amazon FSx ONTAP User Guide * `storage_efficiency_enabled` - (Optional) Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume. * `storage_virtual_machine_id` - (Required) Specifies the storage virtual machine in which to create the volume. * `tags` - (Optional) A map of tags to assign to the volume. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. From 448f72b4a134d0419c591f82fa3830f157f603b9 Mon Sep 17 00:00:00 2001 From: Corey Lane Date: Sun, 16 Jul 2023 22:31:58 -0500 Subject: [PATCH 04/38] adds pr 32530 to changelog --- .changelog/32530.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/32530.txt diff --git a/.changelog/32530.txt b/.changelog/32530.txt new file mode 100644 index 00000000000..485b3c07a7d --- /dev/null +++ b/.changelog/32530.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_fsx_ontap_volume: Add `snapshot_policy` argument +``` From 91f885c5a1346de3c1446de137a75e958548ec1f Mon Sep 17 00:00:00 2001 From: Corey Lane Date: Mon, 17 Jul 2023 17:32:28 -0500 Subject: [PATCH 05/38] adds TestAccFSxOntapVolume_snapshotPolicy --- internal/service/fsx/ontap_volume.go | 2 +- internal/service/fsx/ontap_volume_test.go | 54 +++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 0287da528b7..cc4e56764fc 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -96,7 +96,7 @@ func ResourceOntapVolume() *schema.Resource { "snapshot_policy": { Type: schema.TypeString, Optional: true, - Computed: true, + Default: "default", ValidateFunc: validation.StringLenBetween(1, 255), }, "storage_efficiency_enabled": { diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index d8f3723efcc..68fb97ac2fc 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -284,6 +284,47 @@ func TestAccFSxOntapVolume_size(t *testing.T) { }) } +func TestAccFSxOntapVolume_snapshotPolicy(t *testing.T) { + ctx := acctest.Context(t) + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_ontap_volume.test" + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + policy1 := "default" + policy2 := "none" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy1), + Check: resource.ComposeTestCheckFunc( + testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "snapshot_policy", fmt.Sprint(policy1)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"skip_final_backup"}, + }, + { + Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy2), + Check: resource.ComposeTestCheckFunc( + testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), + testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "snapshot_policy", fmt.Sprint(policy2)), + ), + }, + }, + }) +} + func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume @@ -565,6 +606,19 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, securityStyle)) } +func testAccONTAPVolumeConfig_snapshotPolicy(rName string, snapshotPolicy string) string { + return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + snapshot_policy = %[2]q + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id +} +`, rName, snapshotPolicy)) +} + func testAccONTAPVolumeConfig_size(rName string, size int) string { return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { From 3e3b420d7d766a7c9f25d1e8bd919edd6ef2119e Mon Sep 17 00:00:00 2001 From: Corey Lane Date: Mon, 17 Jul 2023 17:37:57 -0500 Subject: [PATCH 06/38] alpha testAccONTAPVolumeConfig_size --- internal/service/fsx/ontap_volume_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 68fb97ac2fc..7bacf46ef35 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -606,29 +606,29 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, securityStyle)) } -func testAccONTAPVolumeConfig_snapshotPolicy(rName string, snapshotPolicy string) string { +func testAccONTAPVolumeConfig_size(rName string, size int) string { return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" - size_in_megabytes = 1024 - snapshot_policy = %[2]q + size_in_megabytes = %[2]d storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id } -`, rName, snapshotPolicy)) +`, rName, size)) } -func testAccONTAPVolumeConfig_size(rName string, size int) string { +func testAccONTAPVolumeConfig_snapshotPolicy(rName string, snapshotPolicy string) string { return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" - size_in_megabytes = %[2]d + size_in_megabytes = 1024 + snapshot_policy = %[2]q storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id } -`, rName, size)) +`, rName, snapshotPolicy)) } func testAccONTAPVolumeConfig_storageEfficiency(rName string, storageEfficiencyEnabled bool) string { From 0714f93ec3e78ec1535b7aa44119ef67daedba5f Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 13 Aug 2023 23:53:40 +0200 Subject: [PATCH 07/38] feat: added connection_mode attribute + tests --- .../opensearch/inbound_connection_accepter.go | 6 +- .../service/opensearch/outbound_connection.go | 95 ++++++++++- .../opensearch/outbound_connection_test.go | 160 ++++++++++++++++++ 3 files changed, 253 insertions(+), 8 deletions(-) diff --git a/internal/service/opensearch/inbound_connection_accepter.go b/internal/service/opensearch/inbound_connection_accepter.go index ccb578d682a..9396b07446d 100644 --- a/internal/service/opensearch/inbound_connection_accepter.go +++ b/internal/service/opensearch/inbound_connection_accepter.go @@ -33,8 +33,8 @@ func ResourceInboundConnectionAccepter() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(1 * time.Minute), - Delete: schema.DefaultTimeout(1 * time.Minute), + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -113,7 +113,7 @@ func resourceInboundConnectionDelete(ctx context.Context, d *schema.ResourceData } if err := waitForInboundConnectionDeletion(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return diag.Errorf("waiting for VPC Peering Connection (%s) to be deleted: %s", d.Id(), err) + return diag.Errorf("waiting for Inbound Connection (%s) to be deleted: %s", d.Id(), err) } return nil diff --git a/internal/service/opensearch/outbound_connection.go b/internal/service/opensearch/outbound_connection.go index 471044ba02b..90d940f5104 100644 --- a/internal/service/opensearch/outbound_connection.go +++ b/internal/service/opensearch/outbound_connection.go @@ -16,7 +16,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) // @SDKResource("aws_opensearch_outbound_connection") @@ -30,8 +32,8 @@ func ResourceOutboundConnection() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(1 * time.Minute), - Delete: schema.DefaultTimeout(1 * time.Minute), + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -40,6 +42,42 @@ func ResourceOutboundConnection() *schema.Resource { Required: true, ForceNew: true, }, + "connection_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(opensearchservice.ConnectionMode_Values(), false), + }, + "connection_properties": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cross_cluster_search": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "skip_unavailable": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "endpoint": { + Type: schema.TypeString, + Computed: true, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + }, + }, + }, + }, "local_domain_info": outboundConnectionDomainInfoSchema(), "remote_domain_info": outboundConnectionDomainInfoSchema(), "connection_status": { @@ -55,9 +93,11 @@ func resourceOutboundConnectionCreate(ctx context.Context, d *schema.ResourceDat // Create the Outbound Connection createOpts := &opensearchservice.CreateOutboundConnectionInput{ - ConnectionAlias: aws.String(d.Get("connection_alias").(string)), - LocalDomainInfo: expandOutboundConnectionDomainInfo(d.Get("local_domain_info").([]interface{})), - RemoteDomainInfo: expandOutboundConnectionDomainInfo(d.Get("remote_domain_info").([]interface{})), + ConnectionAlias: aws.String(d.Get("connection_alias").(string)), + ConnectionMode: aws.String(d.Get("connection_mode").(string)), + ConnectionProperties: expandOutboundConnectionConnectionProperties(d.Get("connection_properties").([]interface{})), + LocalDomainInfo: expandOutboundConnectionDomainInfo(d.Get("local_domain_info").([]interface{})), + RemoteDomainInfo: expandOutboundConnectionDomainInfo(d.Get("remote_domain_info").([]interface{})), } log.Printf("[DEBUG] Outbound Connection Create options: %#v", createOpts) @@ -98,6 +138,8 @@ func resourceOutboundConnectionRead(ctx context.Context, d *schema.ResourceData, } d.Set("connection_alias", ccsc.ConnectionAlias) + d.Set("connection_mode", ccsc.ConnectionMode) + d.Set("connection_properties", flattenOutboundConnectionConnectionProperties(ccsc.ConnectionProperties)) d.Set("remote_domain_info", flattenOutboundConnectionDomainInfo(ccsc.RemoteDomainInfo)) d.Set("local_domain_info", flattenOutboundConnectionDomainInfo(ccsc.LocalDomainInfo)) d.Set("connection_status", statusCode) @@ -264,3 +306,46 @@ func flattenOutboundConnectionDomainInfo(domainInfo *opensearchservice.DomainInf "region": aws.StringValue(domainInfo.AWSDomainInformation.Region), }} } + +func expandOutboundConnectionConnectionProperties(cProperties []interface{}) *opensearchservice.ConnectionProperties { + if len(cProperties) == 0 || cProperties[0] == nil { + return nil + } + + mOptions := cProperties[0].(map[string]interface{}) + + return &opensearchservice.ConnectionProperties{ + CrossClusterSearch: expandOutboundConnectionCrossClusterSearchConnectionProperties(mOptions["cross_cluster_search"].([]interface{})), + } +} + +func flattenOutboundConnectionConnectionProperties(cProperties *opensearchservice.ConnectionProperties) []interface{} { + if cProperties == nil { + return nil + } + return []interface{}{map[string]interface{}{ + "cross_cluster_search": flattenOutboundConnectionCrossClusterSearchConnectionProperties(cProperties.CrossClusterSearch), + "endpoint": aws.StringValue(cProperties.Endpoint), + }} +} + +func expandOutboundConnectionCrossClusterSearchConnectionProperties(cProperties []interface{}) *opensearchservice.CrossClusterSearchConnectionProperties { + if len(cProperties) == 0 || cProperties[0] == nil { + return nil + } + + mOptions := cProperties[0].(map[string]interface{}) + + return &opensearchservice.CrossClusterSearchConnectionProperties{ + SkipUnavailable: aws.String(mOptions["skip_unavailable"].(string)), + } +} + +func flattenOutboundConnectionCrossClusterSearchConnectionProperties(cProperties *opensearchservice.CrossClusterSearchConnectionProperties) []interface{} { + if cProperties == nil { + return nil + } + return []interface{}{map[string]interface{}{ + "skip_unavailable": aws.StringValue(cProperties.SkipUnavailable), + }} +} diff --git a/internal/service/opensearch/outbound_connection_test.go b/internal/service/opensearch/outbound_connection_test.go index 01c9f0f5b97..04adcf9bb77 100644 --- a/internal/service/opensearch/outbound_connection_test.go +++ b/internal/service/opensearch/outbound_connection_test.go @@ -32,6 +32,7 @@ func TestAccOpenSearchOutboundConnection_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_1", &domain), testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_2", &domain), + resource.TestCheckResourceAttr(resourceName, "connection_properties.#", "2"), resource.TestCheckResourceAttr(resourceName, "connection_status", "PENDING_ACCEPTANCE"), ), }, @@ -44,6 +45,36 @@ func TestAccOpenSearchOutboundConnection_basic(t *testing.T) { }) } +func TestAccOpenSearchOutboundConnection_vpc(t *testing.T) { + ctx := acctest.Context(t) + var domain opensearchservice.DomainStatus + ri := sdkacctest.RandString(10) + name := fmt.Sprintf("tf-test-%s", ri) + resourceName := "aws_opensearch_outbound_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, opensearchservice.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDomainDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOutboundConnectionConfig_vpc(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_1", &domain), + testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_2", &domain), + resource.TestCheckResourceAttr(resourceName, "connection_properties.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccOpenSearchOutboundConnection_disappears(t *testing.T) { ctx := acctest.Context(t) var domain opensearchservice.DomainStatus @@ -151,6 +182,13 @@ data "aws_region" "current" {} resource "aws_opensearch_outbound_connection" "test" { connection_alias = "%s" + + connection_properties { + cross_cluster_search { + skip_unavailable = "ENABLED" + } + } + local_domain_info { owner_id = data.aws_caller_identity.current.account_id region = data.aws_region.current.name @@ -165,3 +203,125 @@ resource "aws_opensearch_outbound_connection" "test" { } `, name, pw, name, pw, name) } + +func testAccOutboundConnectionConfig_vpc(name string) string { + // Satisfy the pw requirements + pw := fmt.Sprintf("Aa1-%s", sdkacctest.RandString(10)) + + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "192.168.0.0/22" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = "192.168.0.0/24" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test2" { + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[1] + cidr_block = "192.168.1.0/24" + + tags = { + Name = %[1]q + } +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_security_group" "test2" { + vpc_id = aws_vpc.test.id +} + +resource "aws_opensearch_domain" "domain_1" { + domain_name = "%[1]s-1" + + cluster_config { + instance_type = "t3.small.search" # supported in both aws and aws-us-gov + } + + ebs_options { + ebs_enabled = true + volume_size = 10 + } + + node_to_node_encryption { + enabled = true + } + + advanced_security_options { + enabled = true + internal_user_database_enabled = true + + master_user_options { + master_user_name = "test" + master_user_password = %[2]q + } + } + + encrypt_at_rest { + enabled = true + } + + domain_endpoint_options { + enforce_https = true + tls_security_policy = "Policy-Min-TLS-1-2-2019-07" + } +} + +resource "aws_opensearch_domain" "domain_2" { + domain_name = "%[1]s-2" + + ebs_options { + ebs_enabled = true + volume_size = 10 + } + + cluster_config { + instance_count = 2 + zone_awareness_enabled = true + instance_type = "t3.small.search" + } + + vpc_options { + security_group_ids = [aws_security_group.test.id, aws_security_group.test2.id] + subnet_ids = [aws_subnet.test.id, aws_subnet.test2.id] + } +} + +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + +resource "aws_opensearch_outbound_connection" "test" { + connection_alias = %[1]q + connection_mode = "VPC_ENDPOINT" + + local_domain_info { + owner_id = data.aws_caller_identity.current.account_id + region = data.aws_region.current.name + domain_name = aws_opensearch_domain.domain_1.domain_name + } + + remote_domain_info { + owner_id = data.aws_caller_identity.current.account_id + region = data.aws_region.current.name + domain_name = aws_opensearch_domain.domain_2.domain_name + } +} + +`, name, pw)) +} From 82c1de484f234210846a8552fce485a24d73f225 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 14 Aug 2023 00:23:04 +0200 Subject: [PATCH 08/38] feat: changed type --- internal/service/opensearch/outbound_connection.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/internal/service/opensearch/outbound_connection.go b/internal/service/opensearch/outbound_connection.go index 90d940f5104..691187bab3f 100644 --- a/internal/service/opensearch/outbound_connection.go +++ b/internal/service/opensearch/outbound_connection.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/verify" ) // @SDKResource("aws_opensearch_outbound_connection") @@ -51,7 +50,7 @@ func ResourceOutboundConnection() *schema.Resource { "connection_properties": { Type: schema.TypeList, Optional: true, - ForceNew: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -71,9 +70,8 @@ func ResourceOutboundConnection() *schema.Resource { }, }, "endpoint": { - Type: schema.TypeString, - Computed: true, - DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + Type: schema.TypeString, + Computed: true, }, }, }, From 491e69a17ea45f950d9d91e82a8b4e9af1da0a6d Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 14 Aug 2023 01:47:17 +0200 Subject: [PATCH 09/38] feat: added accept_connection feature --- .../opensearch/inbound_connection_accepter.go | 37 ------------------- .../service/opensearch/outbound_connection.go | 33 +++++++++++++++++ .../opensearch/outbound_connection_test.go | 13 ++++--- internal/service/opensearch/wait.go | 37 +++++++++++++++++++ 4 files changed, 77 insertions(+), 43 deletions(-) diff --git a/internal/service/opensearch/inbound_connection_accepter.go b/internal/service/opensearch/inbound_connection_accepter.go index 9396b07446d..66ba65059d9 100644 --- a/internal/service/opensearch/inbound_connection_accepter.go +++ b/internal/service/opensearch/inbound_connection_accepter.go @@ -5,7 +5,6 @@ package opensearch import ( "context" - "fmt" "log" "time" @@ -150,39 +149,3 @@ func inboundConnectionRefreshState(ctx context.Context, conn *opensearchservice. return ccsc, statusCode, nil } } - -func inboundConnectionWaitUntilActive(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) error { - log.Printf("[DEBUG] Waiting for Inbound Connection (%s) to become available.", id) - stateConf := &retry.StateChangeConf{ - Pending: []string{ - opensearchservice.InboundConnectionStatusCodeProvisioning, - opensearchservice.InboundConnectionStatusCodeApproved, - }, - Target: []string{ - opensearchservice.InboundConnectionStatusCodeActive, - }, - Refresh: inboundConnectionRefreshState(ctx, conn, id), - Timeout: timeout, - } - if _, err := stateConf.WaitForStateContext(ctx); err != nil { - return fmt.Errorf("waiting for Inbound Connection (%s) to become available: %s", id, err) - } - return nil -} - -func waitForInboundConnectionDeletion(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - opensearchservice.InboundConnectionStatusCodeDeleting, - }, - Target: []string{ - opensearchservice.InboundConnectionStatusCodeDeleted, - }, - Refresh: inboundConnectionRefreshState(ctx, conn, id), - Timeout: timeout, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} diff --git a/internal/service/opensearch/outbound_connection.go b/internal/service/opensearch/outbound_connection.go index 691187bab3f..c6f52808e04 100644 --- a/internal/service/opensearch/outbound_connection.go +++ b/internal/service/opensearch/outbound_connection.go @@ -82,6 +82,12 @@ func ResourceOutboundConnection() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "accept_connection": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, }, } } @@ -114,6 +120,12 @@ func resourceOutboundConnectionCreate(ctx context.Context, d *schema.ResourceDat return diag.Errorf("waiting for Outbound Connection to become available: %s", err) } + if d.Get("accept_connection").(bool) { + if err := inboundConnectionAccept(ctx, d, conn); err != nil { + return diag.Errorf("unable to accept Connection: %s", err) + } + } + return resourceOutboundConnectionRead(ctx, d, meta) } @@ -278,6 +290,27 @@ func outboundConnectionDomainInfoSchema() *schema.Schema { } } +func inboundConnectionAccept(ctx context.Context, d *schema.ResourceData, conn *opensearchservice.OpenSearchService) error { + // Create the Inbound Connection + acceptOpts := &opensearchservice.AcceptInboundConnectionInput{ + ConnectionId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Inbound Connection Accept options: %#v", acceptOpts) + + _, err := conn.AcceptInboundConnectionWithContext(ctx, acceptOpts) + if err != nil { + return err + } + + err = inboundConnectionWaitUntilActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return nil +} + func expandOutboundConnectionDomainInfo(vOptions []interface{}) *opensearchservice.DomainInformationContainer { if len(vOptions) == 0 || vOptions[0] == nil { return nil diff --git a/internal/service/opensearch/outbound_connection_test.go b/internal/service/opensearch/outbound_connection_test.go index 04adcf9bb77..dc444ff814e 100644 --- a/internal/service/opensearch/outbound_connection_test.go +++ b/internal/service/opensearch/outbound_connection_test.go @@ -32,7 +32,6 @@ func TestAccOpenSearchOutboundConnection_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_1", &domain), testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_2", &domain), - resource.TestCheckResourceAttr(resourceName, "connection_properties.#", "2"), resource.TestCheckResourceAttr(resourceName, "connection_status", "PENDING_ACCEPTANCE"), ), }, @@ -59,11 +58,11 @@ func TestAccOpenSearchOutboundConnection_vpc(t *testing.T) { CheckDestroy: testAccCheckDomainDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccOutboundConnectionConfig_vpc(name), + Config: testAccOutboundConnectionConfig_vpcEndpoint(name), Check: resource.ComposeTestCheckFunc( testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_1", &domain), testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_2", &domain), - resource.TestCheckResourceAttr(resourceName, "connection_properties.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "connection_properties.0.endpoint"), ), }, { @@ -182,6 +181,7 @@ data "aws_region" "current" {} resource "aws_opensearch_outbound_connection" "test" { connection_alias = "%s" + connection_mode = "DIRECT" connection_properties { cross_cluster_search { @@ -204,7 +204,7 @@ resource "aws_opensearch_outbound_connection" "test" { `, name, pw, name, pw, name) } -func testAccOutboundConnectionConfig_vpc(name string) string { +func testAccOutboundConnectionConfig_vpcEndpoint(name string) string { // Satisfy the pw requirements pw := fmt.Sprintf("Aa1-%s", sdkacctest.RandString(10)) @@ -307,8 +307,9 @@ data "aws_caller_identity" "current" {} data "aws_region" "current" {} resource "aws_opensearch_outbound_connection" "test" { - connection_alias = %[1]q - connection_mode = "VPC_ENDPOINT" + connection_alias = %[1]q + connection_mode = "VPC_ENDPOINT" + accept_connection = true local_domain_info { owner_id = data.aws_caller_identity.current.account_id diff --git a/internal/service/opensearch/wait.go b/internal/service/opensearch/wait.go index 22ed54fe460..23fff802e76 100644 --- a/internal/service/opensearch/wait.go +++ b/internal/service/opensearch/wait.go @@ -6,6 +6,7 @@ package opensearch import ( "context" "fmt" + "log" "time" "github.com/aws/aws-sdk-go/aws" @@ -156,3 +157,39 @@ func waitForDomainDelete(ctx context.Context, conn *opensearchservice.OpenSearch return err } + +func inboundConnectionWaitUntilActive(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) error { + log.Printf("[DEBUG] Waiting for Inbound Connection (%s) to become available.", id) + stateConf := &retry.StateChangeConf{ + Pending: []string{ + opensearchservice.InboundConnectionStatusCodeProvisioning, + opensearchservice.InboundConnectionStatusCodeApproved, + }, + Target: []string{ + opensearchservice.InboundConnectionStatusCodeActive, + }, + Refresh: inboundConnectionRefreshState(ctx, conn, id), + Timeout: timeout, + } + if _, err := stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for Inbound Connection (%s) to become available: %s", id, err) + } + return nil +} + +func waitForInboundConnectionDeletion(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) error { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + opensearchservice.InboundConnectionStatusCodeDeleting, + }, + Target: []string{ + opensearchservice.InboundConnectionStatusCodeDeleted, + }, + Refresh: inboundConnectionRefreshState(ctx, conn, id), + Timeout: timeout, + } + + _, err := stateConf.WaitForStateContext(ctx) + + return err +} From 7e4cb343b1d177598c56c73c8cb21f63449c2a12 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 14 Aug 2023 02:47:32 +0200 Subject: [PATCH 10/38] feat: added documentation --- .../opensearch/outbound_connection_test.go | 15 +++++++++------ .../opensearch_outbound_connection.html.markdown | 16 ++++++++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/internal/service/opensearch/outbound_connection_test.go b/internal/service/opensearch/outbound_connection_test.go index dc444ff814e..7b67571a3ad 100644 --- a/internal/service/opensearch/outbound_connection_test.go +++ b/internal/service/opensearch/outbound_connection_test.go @@ -36,9 +36,10 @@ func TestAccOpenSearchOutboundConnection_basic(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"accept_connection"}, }, }, }) @@ -66,9 +67,10 @@ func TestAccOpenSearchOutboundConnection_vpc(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"accept_connection"}, }, }, }) @@ -182,6 +184,7 @@ data "aws_region" "current" {} resource "aws_opensearch_outbound_connection" "test" { connection_alias = "%s" connection_mode = "DIRECT" + accept_connection = true connection_properties { cross_cluster_search { diff --git a/website/docs/r/opensearch_outbound_connection.html.markdown b/website/docs/r/opensearch_outbound_connection.html.markdown index 528cc3f8240..972274d61d6 100644 --- a/website/docs/r/opensearch_outbound_connection.html.markdown +++ b/website/docs/r/opensearch_outbound_connection.html.markdown @@ -20,6 +20,7 @@ data "aws_region" "current" {} resource "aws_opensearch_outbound_connection" "foo" { connection_alias = "outbound_connection" + connection_mode = "DIRECT" local_domain_info { owner_id = data.aws_caller_identity.current.account_id region = data.aws_region.current.name @@ -39,9 +40,20 @@ resource "aws_opensearch_outbound_connection" "foo" { This resource supports the following arguments: * `connection_alias` - (Required, Forces new resource) Specifies the connection alias that will be used by the customer for this connection. +* `connection_mode` - (Required, Forces new resource) Specifies the connection mode. Accepted values are `DIRECT` or `VPC_ENDPOINT`. +* `accept_connection` - (Optional, Forces new resource) Accepts the connection. +* `connection_properties` - (Optional, Forces new resource) Configuration block for the outbound connection.. * `local_domain_info` - (Required, Forces new resource) Configuration block for the local Opensearch domain. * `remote_domain_info` - (Required, Forces new resource) Configuration block for the remote Opensearch domain. +### connection_properties + +* `cross_cluster_search` - (Optional, Forces new resource) Configuration block for cross cluster search. + +### cross_cluster_search + +* `skip_unavailable` - (Optional, Forces new resource) Skips unavailable clusters and can only be used for cross-cluster searches. Accepted values are `ENABLED` or `DISABLED`. + ### local_domain_info * `owner_id` - (Required, Forces new resource) The Account ID of the owner of the local domain. @@ -61,6 +73,10 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The Id of the connection. * `connection_status` - Status of the connection request. +`connection_properties` block exports the following: + +* `endpoint` - The endpoint of the remote domain, is only set when `connection_mode` is `VPC_ENDPOINT` and `accept_connection` is `TRUE`. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Opensearch Outbound Connections using the Outbound Connection ID. For example: From a2c97ce64ca826e7db293cec7aec75991dda81ac Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 14 Aug 2023 03:10:47 +0200 Subject: [PATCH 11/38] feat: changed expected connection_status in tests --- internal/service/opensearch/outbound_connection_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/opensearch/outbound_connection_test.go b/internal/service/opensearch/outbound_connection_test.go index 7b67571a3ad..6c8cb740fc5 100644 --- a/internal/service/opensearch/outbound_connection_test.go +++ b/internal/service/opensearch/outbound_connection_test.go @@ -32,7 +32,7 @@ func TestAccOpenSearchOutboundConnection_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_1", &domain), testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_2", &domain), - resource.TestCheckResourceAttr(resourceName, "connection_status", "PENDING_ACCEPTANCE"), + resource.TestCheckResourceAttr(resourceName, "connection_status", "ACTIVE"), ), }, { @@ -64,6 +64,7 @@ func TestAccOpenSearchOutboundConnection_vpc(t *testing.T) { testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_1", &domain), testAccCheckDomainExists(ctx, "aws_opensearch_domain.domain_2", &domain), resource.TestCheckResourceAttrSet(resourceName, "connection_properties.0.endpoint"), + resource.TestCheckResourceAttr(resourceName, "connection_status", "ACTIVE"), ), }, { From 2b84f514cab90d25f438f9c0765a32d2488911ba Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 14 Aug 2023 03:57:34 +0200 Subject: [PATCH 12/38] chore: added changelog --- .changelog/32990.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/32990.txt diff --git a/.changelog/32990.txt b/.changelog/32990.txt new file mode 100644 index 00000000000..2292aff4c49 --- /dev/null +++ b/.changelog/32990.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_opensearch_outbound_connection: Add `connection_properties`, `connection_mode` and `accept_connection` arguments to support specifying connection mode. +``` From 4beb5c45f2c37840d0902e786ea29d0496d5582d Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 14 Aug 2023 04:08:15 +0200 Subject: [PATCH 13/38] chore: removed whitespaces --- internal/service/opensearch/outbound_connection.go | 5 +---- internal/service/opensearch/outbound_connection_test.go | 6 +++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/internal/service/opensearch/outbound_connection.go b/internal/service/opensearch/outbound_connection.go index c6f52808e04..06789758da3 100644 --- a/internal/service/opensearch/outbound_connection.go +++ b/internal/service/opensearch/outbound_connection.go @@ -304,11 +304,8 @@ func inboundConnectionAccept(ctx context.Context, d *schema.ResourceData, conn * } err = inboundConnectionWaitUntilActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) - if err != nil { - return err - } - return nil + return err } func expandOutboundConnectionDomainInfo(vOptions []interface{}) *opensearchservice.DomainInformationContainer { diff --git a/internal/service/opensearch/outbound_connection_test.go b/internal/service/opensearch/outbound_connection_test.go index 6c8cb740fc5..b01d46d8e74 100644 --- a/internal/service/opensearch/outbound_connection_test.go +++ b/internal/service/opensearch/outbound_connection_test.go @@ -183,8 +183,8 @@ data "aws_caller_identity" "current" {} data "aws_region" "current" {} resource "aws_opensearch_outbound_connection" "test" { - connection_alias = "%s" - connection_mode = "DIRECT" + connection_alias = "%s" + connection_mode = "DIRECT" accept_connection = true connection_properties { @@ -314,7 +314,7 @@ resource "aws_opensearch_outbound_connection" "test" { connection_alias = %[1]q connection_mode = "VPC_ENDPOINT" accept_connection = true - + local_domain_info { owner_id = data.aws_caller_identity.current.account_id region = data.aws_region.current.name From 372c29b6911a1aeb319edf04c79a2c6f22b17a3a Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 14 Aug 2023 04:11:46 +0200 Subject: [PATCH 14/38] chore: removed whitespace --- internal/service/opensearch/outbound_connection_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/opensearch/outbound_connection_test.go b/internal/service/opensearch/outbound_connection_test.go index b01d46d8e74..9b779e781b8 100644 --- a/internal/service/opensearch/outbound_connection_test.go +++ b/internal/service/opensearch/outbound_connection_test.go @@ -186,7 +186,7 @@ resource "aws_opensearch_outbound_connection" "test" { connection_alias = "%s" connection_mode = "DIRECT" accept_connection = true - + connection_properties { cross_cluster_search { skip_unavailable = "ENABLED" From 06ee53d9fed4877dc55cefbe3ba1423b863eda61 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Sep 2023 15:08:31 -0400 Subject: [PATCH 15/38] fsx: Move functions around. --- internal/service/fsx/lustre_file_system.go | 16 +-- internal/service/fsx/ontap_volume.go | 142 +++++++++++++++++++- internal/service/fsx/openzfs_file_system.go | 50 ------- internal/service/fsx/status.go | 16 --- internal/service/fsx/wait.go | 66 --------- 5 files changed, 146 insertions(+), 144 deletions(-) diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index dc5f1784182..0a4d214e9ec 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -658,14 +658,6 @@ func logStateFunc(v interface{}) string { return value } -func findFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { - input := &fsx.DescribeFileSystemsInput{ - FileSystemIds: aws.StringSlice([]string{id}), - } - - return findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*fsx.FileSystem]()) -} - func FindLustreFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeLustre) @@ -680,6 +672,14 @@ func FindLustreFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*f return output, nil } +func findFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + input := &fsx.DescribeFileSystemsInput{ + FileSystemIds: aws.StringSlice([]string{id}), + } + + return findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*fsx.FileSystem]()) +} + func findFileSystemByIDAndType(ctx context.Context, conn *fsx.FSx, fsID, fsType string) (*fsx.FileSystem, error) { input := &fsx.DescribeFileSystemsInput{ FileSystemIds: aws.StringSlice([]string{fsID}), diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index cc4e56764fc..db01e5f5467 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -5,6 +5,7 @@ package fsx import ( "context" + "errors" "log" "time" @@ -13,6 +14,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -144,6 +146,7 @@ func ResourceOntapVolume() *schema.Resource { ValidateFunc: validation.StringInSlice(fsx.VolumeType_Values(), false), }, }, + CustomizeDiff: verify.SetTagsDiff, } } @@ -187,16 +190,16 @@ func resourceOntapVolumeCreate(ctx context.Context, d *schema.ResourceData, meta input.OntapConfiguration.TieringPolicy = expandOntapVolumeTieringPolicy(v.([]interface{})) } - result, err := conn.CreateVolumeWithContext(ctx, input) + output, err := conn.CreateVolumeWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx ONTAP Volume (%s): %s", name, err) + return sdkdiag.AppendErrorf(diags, "creating FSx for NetApp ONTAP Volume (%s): %s", name, err) } - d.SetId(aws.StringValue(result.Volume.VolumeId)) + d.SetId(aws.StringValue(output.Volume.VolumeId)) if _, err := waitVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx ONTAP Volume (%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) create: %s", d.Id(), err) } return append(diags, resourceOntapVolumeRead(ctx, d, meta)...) @@ -358,3 +361,134 @@ func flattenOntapVolumeTieringPolicy(rs *fsx.TieringPolicy) []interface{} { return []interface{}{m} } + +func FindVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { + input := &fsx.DescribeVolumesInput{ + VolumeIds: aws.StringSlice([]string{id}), + } + + return findVolume(ctx, conn, input) +} + +func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) (*fsx.Volume, error) { + output, err := findVolumes(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) ([]*fsx.Volume, error) { + var output []*fsx.Volume + + err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Volumes { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusVolume(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindVolumeByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Lifecycle), nil + } +} + +func waitVolumeCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.VolumeLifecycleCreating, fsx.VolumeLifecyclePending}, + Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.Volume); ok { + if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.VolumeLifecyclePending}, + Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 150 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.Volume); ok { + if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitVolumeDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable, fsx.VolumeLifecycleDeleting}, + Target: []string{}, + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.Volume); ok { + if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + } + + return output, err + } + + return nil, err +} diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index f2a453c370c..a1124376149 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -925,52 +924,3 @@ func FindOpenZFSFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (* return output, nil } - -func FindVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { - input := &fsx.DescribeVolumesInput{ - VolumeIds: aws.StringSlice([]string{id}), - } - - return findVolume(ctx, conn, input) -} - -func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) (*fsx.Volume, error) { - output, err := findVolumes(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) ([]*fsx.Volume, error) { - var output []*fsx.Volume - - err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Volumes { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} diff --git a/internal/service/fsx/status.go b/internal/service/fsx/status.go index 5ffe34db105..84848f7cb96 100644 --- a/internal/service/fsx/status.go +++ b/internal/service/fsx/status.go @@ -43,22 +43,6 @@ func statusFileCache(ctx context.Context, conn *fsx.FSx, id string) retry.StateR } } -func statusVolume(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindVolumeByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Lifecycle), nil - } -} - func statusSnapshot(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindSnapshotByID(ctx, conn, id) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 86cab76105c..69a98e51658 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -115,72 +115,6 @@ func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitVolumeCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecycleCreating, fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 150 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitVolumeDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable, fsx.VolumeLifecycleDeleting}, - Target: []string{}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) - } - - return output, err - } - - return nil, err -} - func waitSnapshotCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { stateConf := &retry.StateChangeConf{ Pending: []string{fsx.SnapshotLifecycleCreating, fsx.SnapshotLifecyclePending}, From 0930fbe537b50a3569e1ac5cae9cf01ab7116802 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Sep 2023 15:22:01 -0400 Subject: [PATCH 16/38] r/aws_fsx_ontap_volume: Tidy up resource Read. --- internal/service/fsx/ontap_volume.go | 49 +++++++++++++++------ internal/service/fsx/ontap_volume_test.go | 4 +- internal/service/fsx/openzfs_file_system.go | 2 +- internal/service/fsx/openzfs_volume.go | 17 ++++++- internal/service/fsx/openzfs_volume_test.go | 4 +- 5 files changed, 57 insertions(+), 19 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index db01e5f5467..5ac0ae4e438 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -209,22 +210,19 @@ func resourceOntapVolumeRead(ctx context.Context, d *schema.ResourceData, meta i var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - volume, err := FindVolumeByID(ctx, conn, d.Id()) + volume, err := FindONTAPVolumeByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] FSx ONTAP Volume (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] FSx for NetApp ONTAP Volume (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx ONTAP Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) } ontapConfig := volume.OntapConfiguration - if ontapConfig == nil { - return sdkdiag.AppendErrorf(diags, "reading FSx ONTAP Volume (%s): empty ONTAP configuration", d.Id()) - } d.Set("arn", volume.ResourceARN) d.Set("name", volume.Name) @@ -362,16 +360,41 @@ func flattenOntapVolumeTieringPolicy(rs *fsx.TieringPolicy) []interface{} { return []interface{}{m} } -func FindVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { +func FindONTAPVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { + output, err := findVolumeByIDAndType(ctx, conn, id, fsx.VolumeTypeOntap) + + if err != nil { + return nil, err + } + + if output.OntapConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output, nil +} + +func findVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { input := &fsx.DescribeVolumesInput{ VolumeIds: aws.StringSlice([]string{id}), } - return findVolume(ctx, conn, input) + return findVolume(ctx, conn, input, tfslices.PredicateTrue[*fsx.Volume]()) +} + +func findVolumeByIDAndType(ctx context.Context, conn *fsx.FSx, volID, volType string) (*fsx.Volume, error) { + input := &fsx.DescribeVolumesInput{ + VolumeIds: aws.StringSlice([]string{volID}), + } + filter := func(fs *fsx.Volume) bool { + return aws.StringValue(fs.VolumeType) == volType + } + + return findVolume(ctx, conn, input, filter) } -func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) (*fsx.Volume, error) { - output, err := findVolumes(ctx, conn, input) +func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*fsx.Volume]) (*fsx.Volume, error) { + output, err := findVolumes(ctx, conn, input, filter) if err != nil { return nil, err @@ -380,7 +403,7 @@ func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesIn return tfresource.AssertSinglePtrResult(output) } -func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) ([]*fsx.Volume, error) { +func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*fsx.Volume]) ([]*fsx.Volume, error) { var output []*fsx.Volume err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { @@ -389,7 +412,7 @@ func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesI } for _, v := range page.Volumes { - if v != nil { + if v != nil && filter(v) { output = append(output, v) } } @@ -413,7 +436,7 @@ func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesI func statusVolume(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindVolumeByID(ctx, conn, id) + output, err := findVolumeByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index ad83a24df12..8e5b9517acd 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -481,7 +481,7 @@ func testAccCheckOntapVolumeExists(ctx context.Context, n string, v *fsx.Volume) conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - output, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + output, err := tffsx.FindONTAPVolumeByID(ctx, conn, rs.Primary.ID) if err != nil { return err @@ -502,7 +502,7 @@ func testAccCheckOntapVolumeDestroy(ctx context.Context) resource.TestCheckFunc continue } - volume, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + volume, err := tffsx.FindONTAPVolumeByID(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index a1124376149..b2f71919d85 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -502,7 +502,7 @@ func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, setTagsOut(ctx, filesystem.Tags) - rootVolume, err := FindVolumeByID(ctx, conn, rootVolumeID) + rootVolume, err := FindOpenZFSVolumeByID(ctx, conn, rootVolumeID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading FSx for OpenZFS File System (%s) root volume (%s): %s", d.Id(), rootVolumeID, err) diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index 11691f108f8..db2f64f9a5b 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -272,7 +272,8 @@ func resourceOpenzfsVolumeRead(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - volume, err := FindVolumeByID(ctx, conn, d.Id()) + volume, err := FindOpenZFSVolumeByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FSx OpenZFS volume (%s) not found, removing from state", d.Id()) d.SetId("") @@ -577,3 +578,17 @@ func flattenOpenzfsVolumeOriginSnapshot(rs *fsx.OpenZFSOriginSnapshotConfigurati return []interface{}{m} } + +func FindOpenZFSVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { + output, err := findVolumeByIDAndType(ctx, conn, id, fsx.VolumeTypeOpenzfs) + + if err != nil { + return nil, err + } + + if output.OpenZFSConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output, nil +} diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index 0a4a4006843..62bf627ff1f 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -487,7 +487,7 @@ func testAccCheckOpenzfsVolumeExists(ctx context.Context, resourceName string, v conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - volume1, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + volume1, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) if err != nil { return err } @@ -511,7 +511,7 @@ func testAccCheckOpenzfsVolumeDestroy(ctx context.Context) resource.TestCheckFun continue } - volume, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + volume, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue } From ca74a29623461ee515ebb4a74d348cc672e392b1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Sep 2023 15:23:49 -0400 Subject: [PATCH 17/38] r/aws_fsx_ontap_volume: Tidy up resource Update and Delete. --- internal/service/fsx/ontap_volume.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 5ac0ae4e438..9a5400a3495 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -281,11 +281,11 @@ func resourceOntapVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSx ONTAP Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) } if _, err := waitVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx ONTAP Volume (%s) update: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) update: %s", d.Id(), err) } } @@ -296,7 +296,7 @@ func resourceOntapVolumeDelete(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx ONTAP Volume: %s", d.Id()) + log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP Volume: %s", d.Id()) _, err := conn.DeleteVolumeWithContext(ctx, &fsx.DeleteVolumeInput{ OntapConfiguration: &fsx.DeleteVolumeOntapConfiguration{ SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), @@ -309,11 +309,11 @@ func resourceOntapVolumeDelete(ctx context.Context, d *schema.ResourceData, meta } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting FSx ONTAP Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) } if _, err := waitVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx ONTAP Volume (%s) delete: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) delete: %s", d.Id(), err) } return diags From 95ad7a77f9e4877e3761eb3bd55e1770006c6d5c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Sep 2023 15:30:57 -0400 Subject: [PATCH 18/38] fsx: 'AdministrativeAction' -> 'FileSystemAdministrativeAction'. --- internal/service/fsx/lustre_file_system.go | 16 ++++++++-------- internal/service/fsx/ontap_file_system.go | 2 +- internal/service/fsx/openzfs_file_system.go | 4 ++-- internal/service/fsx/windows_file_system.go | 8 ++++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index 0a4d214e9ec..a9ddb842a8c 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -541,7 +541,7 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } @@ -761,7 +761,7 @@ func waitFileSystemCreated(ctx context.Context, conn *fsx.FSx, id string, timeou if output, ok := outputRaw.(*fsx.FileSystem); ok { if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileSystemLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + tfresource.SetLastError(err, errors.New(aws.StringValue(details.Message))) } return output, err @@ -823,7 +823,7 @@ func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeou if output, ok := outputRaw.(*fsx.FileSystem); ok { if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileSystemLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + tfresource.SetLastError(err, errors.New(aws.StringValue(details.Message))) } return output, err @@ -832,7 +832,7 @@ func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeou return nil, err } -func findAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) (*fsx.AdministrativeAction, error) { +func findFileSystemAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) (*fsx.AdministrativeAction, error) { output, err := findFileSystemByID(ctx, conn, fsID) if err != nil { @@ -853,9 +853,9 @@ func findAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionTy return &fsx.AdministrativeAction{Status: aws.String(fsx.StatusCompleted)}, nil } -func statusAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) retry.StateRefreshFunc { +func statusFileSystemAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findAdministrativeAction(ctx, conn, fsID, actionType) + output, err := findFileSystemAdministrativeAction(ctx, conn, fsID, actionType) if tfresource.NotFound(err) { return nil, "", nil @@ -869,11 +869,11 @@ func statusAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, action } } -func waitAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, fsID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam +func waitFileSystemAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, fsID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{fsx.StatusInProgress, fsx.StatusPending}, Target: []string{fsx.StatusCompleted, fsx.StatusUpdatedOptimizing}, - Refresh: statusAdministrativeAction(ctx, conn, fsID, actionType), + Refresh: statusFileSystemAdministrativeAction(ctx, conn, fsID, actionType), Timeout: timeout, Delay: 30 * time.Second, } diff --git a/internal/service/fsx/ontap_file_system.go b/internal/service/fsx/ontap_file_system.go index 07adec5097d..e65d87065b7 100644 --- a/internal/service/fsx/ontap_file_system.go +++ b/internal/service/fsx/ontap_file_system.go @@ -412,7 +412,7 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index b2f71919d85..b3337313248 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -582,7 +582,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } @@ -604,7 +604,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Root Volume (%s) update: %s", rootVolumeID, err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) } } diff --git a/internal/service/fsx/windows_file_system.go b/internal/service/fsx/windows_file_system.go index bb6f3189d8c..5151a304ba8 100644 --- a/internal/service/fsx/windows_file_system.go +++ b/internal/service/fsx/windows_file_system.go @@ -488,7 +488,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "associating FSx for Windows File Server File System (%s) aliases: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, err) } } @@ -505,7 +505,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "disassociating FSx for Windows File Server File System (%s) aliases: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, err) } } @@ -535,7 +535,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } @@ -591,7 +591,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } From 140d06fa36495e08a303cdc94d89293ebacec23f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Sep 2023 15:46:11 -0400 Subject: [PATCH 19/38] fsx: Add 'waitVolumeAdministrativeActionCompleted' and friends. --- internal/service/fsx/ontap_volume.go | 99 +++++++++++++++++++-- internal/service/fsx/openzfs_file_system.go | 7 +- internal/service/fsx/openzfs_volume.go | 3 +- 3 files changed, 97 insertions(+), 12 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 9a5400a3495..6e0e66b5db1 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -6,6 +6,7 @@ package fsx import ( "context" "errors" + "fmt" "log" "time" @@ -278,15 +279,20 @@ func resourceOntapVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta input.OntapConfiguration.TieringPolicy = expandOntapVolumeTieringPolicy(d.Get("tiering_policy").([]interface{})) } + startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) } - if _, err := waitVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitVolumeUpdated(ctx, conn, d.Id(), startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) update: %s", d.Id(), err) } + + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) + } } return append(diags, resourceOntapVolumeRead(ctx, d, meta)...) @@ -462,8 +468,8 @@ func waitVolumeCreated(ctx context.Context, conn *fsx.FSx, id string, timeout ti outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + if status, reason := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && reason != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) } return output, err @@ -472,7 +478,7 @@ func waitVolumeCreated(ctx context.Context, conn *fsx.FSx, id string, timeout ti return nil, err } -func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam +func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, startTime time.Time, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{fsx.VolumeLifecyclePending}, Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, @@ -484,8 +490,26 @@ func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout ti outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + switch status := aws.StringValue(output.Lifecycle); status { + case fsx.VolumeLifecycleFailed: + // Report any failed non-VOLUME_UPDATE administrative actions. + // See https://docs.aws.amazon.com/fsx/latest/APIReference/API_AdministrativeAction.html#FSx-Type-AdministrativeAction-AdministrativeActionType. + administrativeActions := tfslices.Filter(output.AdministrativeActions, func(v *fsx.AdministrativeAction) bool { + return v != nil && aws.StringValue(v.Status) == fsx.StatusFailed && aws.StringValue(v.AdministrativeActionType) != fsx.AdministrativeActionTypeVolumeUpdate && v.FailureDetails != nil && startTime.Before(aws.TimeValue(v.RequestTime)) + }) + administrativeActionsError := errors.Join(tfslices.ApplyToAll(administrativeActions, func(v *fsx.AdministrativeAction) error { + return fmt.Errorf("%s: %s", aws.StringValue(v.AdministrativeActionType), aws.StringValue(v.FailureDetails.Message)) + })...) + + if reason := output.LifecycleTransitionReason; reason != nil { + if message := aws.StringValue(reason.Message); administrativeActionsError != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %w", message, administrativeActionsError)) + } else { + tfresource.SetLastError(err, errors.New(message)) + } + } else { + tfresource.SetLastError(err, administrativeActionsError) + } } return output, err @@ -506,8 +530,67 @@ func waitVolumeDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout ti outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + if status, reason := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && reason != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) + } + + return output, err + } + + return nil, err +} + +func findVolumeAdministrativeAction(ctx context.Context, conn *fsx.FSx, volID, actionType string) (*fsx.AdministrativeAction, error) { + output, err := findVolumeByID(ctx, conn, volID) + + if err != nil { + return nil, err + } + + for _, v := range output.AdministrativeActions { + if v == nil { + continue + } + + if aws.StringValue(v.AdministrativeActionType) == actionType { + return v, nil + } + } + + // If the administrative action isn't found, assume it's complete. + return &fsx.AdministrativeAction{Status: aws.String(fsx.StatusCompleted)}, nil +} + +func statusVolumeAdministrativeAction(ctx context.Context, conn *fsx.FSx, volID, actionType string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findVolumeAdministrativeAction(ctx, conn, volID, actionType) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status), nil + } +} + +func waitVolumeAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, volID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.StatusInProgress, fsx.StatusPending}, + Target: []string{fsx.StatusCompleted, fsx.StatusUpdatedOptimizing}, + Refresh: statusVolumeAdministrativeAction(ctx, conn, volID, actionType), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.AdministrativeAction); ok { + if status, details := aws.StringValue(output.Status), output.FailureDetails; status == fsx.StatusFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) } return output, err diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index b3337313248..915404a5068 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -594,18 +594,19 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData VolumeId: aws.String(rootVolumeID), } + startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS Root Volume (%s): %s", rootVolumeID, err) } - if _, err := waitVolumeUpdated(ctx, conn, rootVolumeID, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitVolumeUpdated(ctx, conn, rootVolumeID, startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Root Volume (%s) update: %s", rootVolumeID, err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, rootVolumeID, fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", rootVolumeID, fsx.AdministrativeActionTypeVolumeUpdate, err) } } } diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index db2f64f9a5b..b8e92353cb8 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -363,13 +363,14 @@ func resourceOpenzfsVolumeUpdate(ctx context.Context, d *schema.ResourceData, me input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenzfsVolumeUserAndGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) } + startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx OpenZFS Volume (%s): %s", d.Id(), err) } - if _, err := waitVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitVolumeUpdated(ctx, conn, d.Id(), startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Volume (%s) update: %s", d.Id(), err) } } From 767ba0cced22a92f34a159beff8a1543aa94edc3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Sep 2023 15:56:47 -0400 Subject: [PATCH 20/38] r/aws_fsx_ontap_volume: Correct function names. --- internal/service/fsx/ontap_volume.go | 32 ++-- internal/service/fsx/ontap_volume_test.go | 154 ++++++++++---------- internal/service/fsx/service_package_gen.go | 2 +- 3 files changed, 94 insertions(+), 94 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 6e0e66b5db1..d599d201534 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -29,12 +29,12 @@ import ( // @SDKResource("aws_fsx_ontap_volume", name="ONTAP Volume") // @Tags(identifierAttribute="arn") -func ResourceOntapVolume() *schema.Resource { +func ResourceONTAPVolume() *schema.Resource { return &schema.Resource{ - CreateWithoutTimeout: resourceOntapVolumeCreate, - ReadWithoutTimeout: resourceOntapVolumeRead, - UpdateWithoutTimeout: resourceOntapVolumeUpdate, - DeleteWithoutTimeout: resourceOntapVolumeDelete, + CreateWithoutTimeout: resourceONTAPVolumeCreate, + ReadWithoutTimeout: resourceONTAPVolumeRead, + UpdateWithoutTimeout: resourceONTAPVolumeUpdate, + DeleteWithoutTimeout: resourceONTAPVolumeDelete, Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { @@ -153,7 +153,7 @@ func ResourceOntapVolume() *schema.Resource { } } -func resourceOntapVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) @@ -189,7 +189,7 @@ func resourceOntapVolumeCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("tiering_policy"); ok { - input.OntapConfiguration.TieringPolicy = expandOntapVolumeTieringPolicy(v.([]interface{})) + input.OntapConfiguration.TieringPolicy = expandTieringPolicy(v.([]interface{})) } output, err := conn.CreateVolumeWithContext(ctx, input) @@ -204,10 +204,10 @@ func resourceOntapVolumeCreate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) create: %s", d.Id(), err) } - return append(diags, resourceOntapVolumeRead(ctx, d, meta)...) + return append(diags, resourceONTAPVolumeRead(ctx, d, meta)...) } -func resourceOntapVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) @@ -235,7 +235,7 @@ func resourceOntapVolumeRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("snapshot_policy", ontapConfig.SnapshotPolicy) d.Set("storage_efficiency_enabled", ontapConfig.StorageEfficiencyEnabled) d.Set("storage_virtual_machine_id", ontapConfig.StorageVirtualMachineId) - if err := d.Set("tiering_policy", flattenOntapVolumeTieringPolicy(ontapConfig.TieringPolicy)); err != nil { + if err := d.Set("tiering_policy", flattenTieringPolicy(ontapConfig.TieringPolicy)); err != nil { return sdkdiag.AppendErrorf(diags, "setting tiering_policy: %s", err) } d.Set("uuid", ontapConfig.UUID) @@ -244,7 +244,7 @@ func resourceOntapVolumeRead(ctx context.Context, d *schema.ResourceData, meta i return diags } -func resourceOntapVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) @@ -276,7 +276,7 @@ func resourceOntapVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange("tiering_policy") { - input.OntapConfiguration.TieringPolicy = expandOntapVolumeTieringPolicy(d.Get("tiering_policy").([]interface{})) + input.OntapConfiguration.TieringPolicy = expandTieringPolicy(d.Get("tiering_policy").([]interface{})) } startTime := time.Now() @@ -295,10 +295,10 @@ func resourceOntapVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta } } - return append(diags, resourceOntapVolumeRead(ctx, d, meta)...) + return append(diags, resourceONTAPVolumeRead(ctx, d, meta)...) } -func resourceOntapVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) @@ -325,7 +325,7 @@ func resourceOntapVolumeDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func expandOntapVolumeTieringPolicy(cfg []interface{}) *fsx.TieringPolicy { +func expandTieringPolicy(cfg []interface{}) *fsx.TieringPolicy { if len(cfg) < 1 { return nil } @@ -347,7 +347,7 @@ func expandOntapVolumeTieringPolicy(cfg []interface{}) *fsx.TieringPolicy { return &out } -func flattenOntapVolumeTieringPolicy(rs *fsx.TieringPolicy) []interface{} { +func flattenTieringPolicy(rs *fsx.TieringPolicy) []interface{} { if rs == nil { return []interface{}{} } diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 8e5b9517acd..6ffcbeb444d 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestAccFSxOntapVolume_basic(t *testing.T) { +func TestAccFSxONTAPVolume_basic(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -30,12 +30,12 @@ func TestAccFSxOntapVolume_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), resource.TestCheckResourceAttrSet(resourceName, "file_system_id"), resource.TestCheckResourceAttr(resourceName, "junction_path", fmt.Sprintf("/%[1]s", rName)), @@ -62,7 +62,7 @@ func TestAccFSxOntapVolume_basic(t *testing.T) { }) } -func TestAccFSxOntapVolume_disappears(t *testing.T) { +func TestAccFSxONTAPVolume_disappears(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -72,13 +72,13 @@ func TestAccFSxOntapVolume_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceOntapVolume(), resourceName), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceONTAPVolume(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -86,7 +86,7 @@ func TestAccFSxOntapVolume_disappears(t *testing.T) { }) } -func TestAccFSxOntapVolume_name(t *testing.T) { +func TestAccFSxONTAPVolume_name(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -97,12 +97,12 @@ func TestAccFSxOntapVolume_name(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), ), }, @@ -115,8 +115,8 @@ func TestAccFSxOntapVolume_name(t *testing.T) { { Config: testAccONTAPVolumeConfig_basic(rName2), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName2), ), }, @@ -124,7 +124,7 @@ func TestAccFSxOntapVolume_name(t *testing.T) { }) } -func TestAccFSxOntapVolume_junctionPath(t *testing.T) { +func TestAccFSxONTAPVolume_junctionPath(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -136,12 +136,12 @@ func TestAccFSxOntapVolume_junctionPath(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_junctionPath(rName, jPath1), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "junction_path", jPath1), ), @@ -155,8 +155,8 @@ func TestAccFSxOntapVolume_junctionPath(t *testing.T) { { Config: testAccONTAPVolumeConfig_junctionPath(rName, jPath2), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "junction_path", jPath2), ), @@ -165,7 +165,7 @@ func TestAccFSxOntapVolume_junctionPath(t *testing.T) { }) } -func TestAccFSxOntapVolume_ontapVolumeType(t *testing.T) { +func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -175,12 +175,12 @@ func TestAccFSxOntapVolume_ontapVolumeType(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_ontapVolumeTypeDP(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "ontap_volume_type", "DP"), ), @@ -195,7 +195,7 @@ func TestAccFSxOntapVolume_ontapVolumeType(t *testing.T) { }) } -func TestAccFSxOntapVolume_securityStyle(t *testing.T) { +func TestAccFSxONTAPVolume_securityStyle(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -205,12 +205,12 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_securityStyle(rName, "UNIX"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "security_style", "UNIX"), ), @@ -224,8 +224,8 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { { Config: testAccONTAPVolumeConfig_securityStyle(rName, "NTFS"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "security_style", "NTFS"), ), @@ -233,8 +233,8 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { { Config: testAccONTAPVolumeConfig_securityStyle(rName, "MIXED"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume3), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume3), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume3), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume3), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "security_style", "MIXED"), ), @@ -243,7 +243,7 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { }) } -func TestAccFSxOntapVolume_size(t *testing.T) { +func TestAccFSxONTAPVolume_size(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -255,12 +255,12 @@ func TestAccFSxOntapVolume_size(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_size(rName, size1), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", fmt.Sprint(size1)), ), @@ -274,8 +274,8 @@ func TestAccFSxOntapVolume_size(t *testing.T) { { Config: testAccONTAPVolumeConfig_size(rName, size2), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", fmt.Sprint(size2)), ), @@ -284,7 +284,7 @@ func TestAccFSxOntapVolume_size(t *testing.T) { }) } -func TestAccFSxOntapVolume_snapshotPolicy(t *testing.T) { +func TestAccFSxONTAPVolume_snapshotPolicy(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -296,12 +296,12 @@ func TestAccFSxOntapVolume_snapshotPolicy(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy1), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "snapshot_policy", fmt.Sprint(policy1)), ), @@ -315,8 +315,8 @@ func TestAccFSxOntapVolume_snapshotPolicy(t *testing.T) { { Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy2), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "snapshot_policy", fmt.Sprint(policy2)), ), @@ -325,7 +325,7 @@ func TestAccFSxOntapVolume_snapshotPolicy(t *testing.T) { }) } -func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { +func TestAccFSxONTAPVolume_storageEfficiency(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -335,12 +335,12 @@ func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_storageEfficiency(rName, true), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "true"), ), @@ -354,8 +354,8 @@ func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { { Config: testAccONTAPVolumeConfig_storageEfficiency(rName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "false"), ), @@ -364,7 +364,7 @@ func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { }) } -func TestAccFSxOntapVolume_tags(t *testing.T) { +func TestAccFSxONTAPVolume_tags(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -374,12 +374,12 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -393,8 +393,8 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { { Config: testAccONTAPVolumeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -403,8 +403,8 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { { Config: testAccONTAPVolumeConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume3), - testAccCheckOntapVolumeNotRecreated(&volume2, &volume3), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume3), + testAccCheckONTAPVolumeNotRecreated(&volume2, &volume3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -413,7 +413,7 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { }) } -func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { +func TestAccFSxONTAPVolume_tieringPolicy(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3, volume4 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -423,12 +423,12 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName, "NONE"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "NONE"), ), @@ -442,8 +442,8 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { { Config: testAccONTAPVolumeConfig_tieringPolicy(rName, "SNAPSHOT_ONLY", 10), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "SNAPSHOT_ONLY"), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.cooling_period", "10"), @@ -452,8 +452,8 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { { Config: testAccONTAPVolumeConfig_tieringPolicy(rName, "AUTO", 60), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume3), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume3), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume3), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume3), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "AUTO"), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.cooling_period", "60"), @@ -462,8 +462,8 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { { Config: testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName, "ALL"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume4), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume4), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume4), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume4), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "ALL"), ), @@ -472,7 +472,7 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { }) } -func testAccCheckOntapVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -493,7 +493,7 @@ func testAccCheckOntapVolumeExists(ctx context.Context, n string, v *fsx.Volume) } } -func testAccCheckOntapVolumeDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckONTAPVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) @@ -509,34 +509,34 @@ func testAccCheckOntapVolumeDestroy(ctx context.Context) resource.TestCheckFunc } if volume != nil { - return fmt.Errorf("FSx ONTAP Volume (%s) still exists", rs.Primary.ID) + return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) still exists", rs.Primary.ID) } } return nil } } -func testAccCheckOntapVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) != aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx ONTAP Volume (%s) recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccCheckOntapVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) == aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx ONTAP Volume (%s) not recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) not recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccOntapVolumeConfig_base(rName string) string { +func testAccONTAPVolumeConfig_base(rName string) string { return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` resource "aws_fsx_ontap_file_system" "test" { storage_capacity = 1024 @@ -558,7 +558,7 @@ resource "aws_fsx_ontap_storage_virtual_machine" "test" { } func testAccONTAPVolumeConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -570,7 +570,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_junctionPath(rName string, junctionPath string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = %[2]q @@ -582,7 +582,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_ontapVolumeTypeDP(rName string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q ontap_volume_type = "DP" @@ -594,7 +594,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_securityStyle(rName string, securityStyle string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -607,7 +607,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_size(rName string, size int) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -619,7 +619,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_snapshotPolicy(rName string, snapshotPolicy string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -632,7 +632,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_storageEfficiency(rName string, storageEfficiencyEnabled bool) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -644,7 +644,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_tieringPolicy(rName string, policy string, coolingPeriod int) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -661,7 +661,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName string, policy string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -677,7 +677,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -693,7 +693,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" diff --git a/internal/service/fsx/service_package_gen.go b/internal/service/fsx/service_package_gen.go index bd51a355f99..78f66ed5d79 100644 --- a/internal/service/fsx/service_package_gen.go +++ b/internal/service/fsx/service_package_gen.go @@ -102,7 +102,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceOntapVolume, + Factory: ResourceONTAPVolume, TypeName: "aws_fsx_ontap_volume", Name: "ONTAP Volume", Tags: &types.ServicePackageResourceTags{ From cb8c7089980424666ef946a220ddcdeb205ddadb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Sep 2023 16:13:15 -0400 Subject: [PATCH 21/38] r/aws_fsx_ontap_volume: Add 'copy_tags_to_backups' argument. --- .changelog/32530.txt | 2 +- internal/service/fsx/ontap_volume.go | 18 ++++- internal/service/fsx/ontap_volume_test.go | 77 +++++++++++++++---- website/docs/r/fsx_ontap_volume.html.markdown | 1 + 4 files changed, 82 insertions(+), 16 deletions(-) diff --git a/.changelog/32530.txt b/.changelog/32530.txt index 485b3c07a7d..db6d3e812f9 100644 --- a/.changelog/32530.txt +++ b/.changelog/32530.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_fsx_ontap_volume: Add `snapshot_policy` argument +resource/aws_fsx_ontap_volume: Add `copy_tags_to_backups` and `snapshot_policy` arguments ``` diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index d599d201534..1062488d1ea 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -55,6 +55,11 @@ func ResourceONTAPVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "copy_tags_to_backups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "file_system_id": { Type: schema.TypeString, Computed: true, @@ -168,6 +173,10 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta VolumeType: aws.String(d.Get("volume_type").(string)), } + if v, ok := d.GetOk("copy_tags_to_backups"); ok { + input.OntapConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("junction_path"); ok { input.OntapConfiguration.JunctionPath = aws.String(v.(string)) } @@ -226,9 +235,10 @@ func resourceONTAPVolumeRead(ctx context.Context, d *schema.ResourceData, meta i ontapConfig := volume.OntapConfiguration d.Set("arn", volume.ResourceARN) - d.Set("name", volume.Name) + d.Set("copy_tags_to_backups", ontapConfig.CopyTagsToBackups) d.Set("file_system_id", volume.FileSystemId) d.Set("junction_path", ontapConfig.JunctionPath) + d.Set("name", volume.Name) d.Set("ontap_volume_type", ontapConfig.OntapVolumeType) d.Set("security_style", ontapConfig.SecurityStyle) d.Set("size_in_megabytes", ontapConfig.SizeInMegabytes) @@ -248,13 +258,17 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept("tags_all", "tags") { + if d.HasChangesExcept("tags", "tags_all") { input := &fsx.UpdateVolumeInput{ ClientRequestToken: aws.String(id.UniqueId()), OntapConfiguration: &fsx.UpdateOntapVolumeConfiguration{}, VolumeId: aws.String(d.Id()), } + if d.HasChange("copy_tags_to_backups") { + input.OntapConfiguration.CopyTagsToBackups = aws.Bool(d.Get("copy_tags_to_backups").(bool)) + } + if d.HasChange("junction_path") { input.OntapConfiguration.JunctionPath = aws.String(d.Get("junction_path").(string)) } diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 6ffcbeb444d..8969cc83877 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -34,9 +34,10 @@ func TestAccFSxONTAPVolume_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), resource.TestCheckResourceAttrSet(resourceName, "file_system_id"), resource.TestCheckResourceAttr(resourceName, "junction_path", fmt.Sprintf("/%[1]s", rName)), resource.TestCheckResourceAttr(resourceName, "ontap_volume_type", "RW"), @@ -86,12 +87,11 @@ func TestAccFSxONTAPVolume_disappears(t *testing.T) { }) } -func TestAccFSxONTAPVolume_name(t *testing.T) { +func TestAccFSxONTAPVolume_copyTagsToBackups(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) - rName2 := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -100,10 +100,10 @@ func TestAccFSxONTAPVolume_name(t *testing.T) { CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccONTAPVolumeConfig_basic(rName), + Config: testAccONTAPVolumeConfig_copyTagsToBackups(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), - resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "true"), ), }, { @@ -113,11 +113,11 @@ func TestAccFSxONTAPVolume_name(t *testing.T) { ImportStateVerifyIgnore: []string{"skip_final_backup"}, }, { - Config: testAccONTAPVolumeConfig_basic(rName2), + Config: testAccONTAPVolumeConfig_copyTagsToBackups(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), - testAccCheckONTAPVolumeRecreated(&volume1, &volume2), - resource.TestCheckResourceAttr(resourceName, "name", rName2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), ), }, }, @@ -165,6 +165,44 @@ func TestAccFSxONTAPVolume_junctionPath(t *testing.T) { }) } +func TestAccFSxONTAPVolume_name(t *testing.T) { + ctx := acctest.Context(t) + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_ontap_volume.test" + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + rName2 := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"skip_final_backup"}, + }, + { + Config: testAccONTAPVolumeConfig_basic(rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + ), + }, + }, + }) +} + func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume @@ -569,7 +607,20 @@ resource "aws_fsx_ontap_volume" "test" { `, rName)) } -func testAccONTAPVolumeConfig_junctionPath(rName string, junctionPath string) string { +func testAccONTAPVolumeConfig_copyTagsToBackups(rName string, copyTagsToBackups bool) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id + copy_tags_to_backups = %[2]t +} +`, rName, copyTagsToBackups)) +} + +func testAccONTAPVolumeConfig_junctionPath(rName, junctionPath string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q @@ -593,7 +644,7 @@ resource "aws_fsx_ontap_volume" "test" { `, rName)) } -func testAccONTAPVolumeConfig_securityStyle(rName string, securityStyle string) string { +func testAccONTAPVolumeConfig_securityStyle(rName, securityStyle string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q @@ -618,7 +669,7 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, size)) } -func testAccONTAPVolumeConfig_snapshotPolicy(rName string, snapshotPolicy string) string { +func testAccONTAPVolumeConfig_snapshotPolicy(rName, snapshotPolicy string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q @@ -643,7 +694,7 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, storageEfficiencyEnabled)) } -func testAccONTAPVolumeConfig_tieringPolicy(rName string, policy string, coolingPeriod int) string { +func testAccONTAPVolumeConfig_tieringPolicy(rName, policy string, coolingPeriod int) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q @@ -660,7 +711,7 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, policy, coolingPeriod)) } -func testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName string, policy string) string { +func testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName, policy string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index 2c43df10e13..9652df875b9 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -49,6 +49,7 @@ resource "aws_fsx_ontap_volume" "test" { This resource supports the following arguments: * `name` - (Required) The name of the Volume. You can use a maximum of 203 alphanumeric characters, plus the underscore (_) special character. +* `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to `false`. * `junction_path` - (Optional) Specifies the location in the storage virtual machine's namespace where the volume is mounted. The junction_path must have a leading forward slash, such as `/vol3` * `ontap_volume_type` - (Optional) Specifies the type of volume, valid values are `RW`, `DP`. Default value is `RW`. These can be set by the ONTAP CLI or API. This setting is used as part of migration and replication [Migrating to Amazon FSx for NetApp ONTAP](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/migrating-fsx-ontap.html) * `security_style` - (Optional) Specifies the volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. From fd8075f3c52a2ab0f1e7ee8f1bd872929e7d97aa Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 23 Sep 2023 07:27:29 -0400 Subject: [PATCH 22/38] r/aws_fsx_ontap_volume: Make 'snapshot_policy' Computed, preventing errors like 'BadRequest: Invalid fields provided for a DP volume. JunctionPath, StorageEfficiency, SnapshotPolicy and SecurityStyle cannot be specified for a DP Volume'. --- internal/service/fsx/ontap_volume.go | 2 +- internal/service/fsx/ontap_volume_test.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 1062488d1ea..f3633256e07 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -105,7 +105,7 @@ func ResourceONTAPVolume() *schema.Resource { "snapshot_policy": { Type: schema.TypeString, Optional: true, - Default: "default", + Computed: true, ValidateFunc: validation.StringLenBetween(1, 255), }, "storage_efficiency_enabled": { diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 8969cc83877..b02ccd8e8ae 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -219,7 +219,6 @@ func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { Config: testAccONTAPVolumeConfig_ontapVolumeTypeDP(rName), Check: resource.ComposeTestCheckFunc( testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), - resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "ontap_volume_type", "DP"), ), }, From b93f168c8799e30e0d87e4c9656885f5f6f334d6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 23 Sep 2023 07:27:57 -0400 Subject: [PATCH 23/38] Acceptance test output: % ACCTEST_TIMEOUT=720m make testacc TESTARGS='-run=TestAccFSxONTAPVolume_basic\|TestAccFSxONTAPVolume_ontapVolumeType\|TestAccFSxONTAPVolume_snapshotPolicy' PKG=fsx ACCTEST_PARALLELISM=3 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/fsx/... -v -count 1 -parallel 3 -run=TestAccFSxONTAPVolume_basic\|TestAccFSxONTAPVolume_ontapVolumeType\|TestAccFSxONTAPVolume_snapshotPolicy -timeout 720m === RUN TestAccFSxONTAPVolume_basic === PAUSE TestAccFSxONTAPVolume_basic === RUN TestAccFSxONTAPVolume_ontapVolumeType === PAUSE TestAccFSxONTAPVolume_ontapVolumeType === RUN TestAccFSxONTAPVolume_snapshotPolicy === PAUSE TestAccFSxONTAPVolume_snapshotPolicy === CONT TestAccFSxONTAPVolume_basic === CONT TestAccFSxONTAPVolume_snapshotPolicy === CONT TestAccFSxONTAPVolume_ontapVolumeType --- PASS: TestAccFSxONTAPVolume_ontapVolumeType (2320.79s) --- PASS: TestAccFSxONTAPVolume_snapshotPolicy (2623.55s) --- PASS: TestAccFSxONTAPVolume_basic (2626.48s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/fsx 2631.567s From 57d58923b1fd3a26d57a51a042bc4ea144a56b89 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 23 Sep 2023 14:51:53 -0400 Subject: [PATCH 24/38] r/aws_fsx_openzfs_volume: Correct function names. --- internal/service/fsx/ontap_volume_test.go | 8 +- internal/service/fsx/openzfs_file_system.go | 33 --- internal/service/fsx/openzfs_volume.go | 150 +++++++------- internal/service/fsx/openzfs_volume_test.go | 213 +++++++++----------- internal/service/fsx/service_package_gen.go | 2 +- 5 files changed, 165 insertions(+), 241 deletions(-) diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index b02ccd8e8ae..7609d2897a9 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -539,15 +539,17 @@ func testAccCheckONTAPVolumeDestroy(ctx context.Context) resource.TestCheckFunc continue } - volume, err := tffsx.FindONTAPVolumeByID(ctx, conn, rs.Primary.ID) + _, err := tffsx.FindONTAPVolumeByID(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue } - if volume != nil { - return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) still exists", rs.Primary.ID) + if err != nil { + return err } + + return fmt.Errorf("FSx for NetApp ONTAP Volume %s still exists", rs.Primary.ID) } return nil } diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index 915404a5068..f2c28c95a1c 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -797,20 +797,6 @@ func expandOpenZFSClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientCo return configurations } -func expandOpenZFSClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { - out := fsx.OpenZFSClientConfiguration{} - - if v, ok := conf["clients"].(string); ok && len(v) > 0 { - out.Clients = aws.String(v) - } - - if v, ok := conf["options"].([]interface{}); ok { - out.Options = flex.ExpandStringList(v) - } - - return &out -} - func flattenOpenZFSFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { if rs == nil { return []interface{}{} @@ -873,25 +859,6 @@ func flattenOpenZFSFileNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]inter return nil } -func flattenOpenZFSClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { - configurations := make([]map[string]interface{}, 0) - - for _, configuration := range rs { - if configuration != nil { - cfg := make(map[string]interface{}) - cfg["clients"] = aws.StringValue(configuration.Clients) - cfg["options"] = flex.FlattenStringList(configuration.Options) - configurations = append(configurations, cfg) - } - } - - if len(configurations) > 0 { - return configurations - } - - return nil -} - func flattenOpenZFSFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { quotas := make([]map[string]interface{}, 0) diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index b8e92353cb8..f798c7ec10d 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -27,12 +27,13 @@ import ( // @SDKResource("aws_fsx_openzfs_volume", name="OpenZFS Volume") // @Tags(identifierAttribute="arn") -func ResourceOpenzfsVolume() *schema.Resource { +func ResourceOpenZFSVolume() *schema.Resource { return &schema.Resource{ - CreateWithoutTimeout: resourceOpenzfsVolumeCreate, - ReadWithoutTimeout: resourceOpenzfsVolumeRead, - UpdateWithoutTimeout: resourceOpenzfsVolumeUpdate, - DeleteWithoutTimeout: resourceOpenzfsVolumeDelete, + CreateWithoutTimeout: resourceOpenZFSVolumeCreate, + ReadWithoutTimeout: resourceOpenZFSVolumeRead, + UpdateWithoutTimeout: resourceOpenZFSVolumeUpdate, + DeleteWithoutTimeout: resourceOpenZFSVolumeDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -185,6 +186,7 @@ func ResourceOpenzfsVolume() *schema.Resource { Type: schema.TypeString, Default: fsx.VolumeTypeOpenzfs, Optional: true, + ForceNew: true, ValidateFunc: validation.StringInSlice(fsx.VolumeType_Values(), false), }, }, @@ -193,18 +195,19 @@ func ResourceOpenzfsVolume() *schema.Resource { } } -func resourceOpenzfsVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) + name := d.Get("name").(string) input := &fsx.CreateVolumeInput{ ClientRequestToken: aws.String(id.UniqueId()), - Name: aws.String(d.Get("name").(string)), - VolumeType: aws.String(d.Get("volume_type").(string)), + Name: aws.String(name), OpenZFSConfiguration: &fsx.CreateOpenZFSVolumeConfiguration{ ParentVolumeId: aws.String(d.Get("parent_volume_id").(string)), }, - Tags: getTagsIn(ctx), + Tags: getTagsIn(ctx), + VolumeType: aws.String(d.Get("volume_type").(string)), } if v, ok := d.GetOk("copy_tags_to_snapshots"); ok { @@ -216,7 +219,11 @@ func resourceOpenzfsVolumeCreate(ctx context.Context, d *schema.ResourceData, me } if v, ok := d.GetOk("nfs_exports"); ok { - input.OpenZFSConfiguration.NfsExports = expandOpenzfsVolumeNFSExports(v.([]interface{})) + input.OpenZFSConfiguration.NfsExports = expandOpenZFSNfsExports(v.([]interface{})) + } + + if v, ok := d.GetOk("origin_snapshot"); ok { + input.OpenZFSConfiguration.OriginSnapshot = expandCreateOpenZFSOriginSnapshotConfiguration(v.([]interface{})) } if v, ok := d.GetOk("read_only"); ok { @@ -236,99 +243,74 @@ func resourceOpenzfsVolumeCreate(ctx context.Context, d *schema.ResourceData, me } if v, ok := d.GetOk("user_and_group_quotas"); ok { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenzfsVolumeUserAndGroupQuotas(v.(*schema.Set).List()) + input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) } - if v, ok := d.GetOk("origin_snapshot"); ok { - input.OpenZFSConfiguration.OriginSnapshot = expandOpenzfsCreateVolumeOriginSnapshot(v.([]interface{})) - - log.Printf("[DEBUG] Creating FSx OpenZFS Volume: %s", input) - result, err := conn.CreateVolumeWithContext(ctx, input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx OpenZFS Volume from snapshot: %s", err) - } - - d.SetId(aws.StringValue(result.Volume.VolumeId)) - } else { - log.Printf("[DEBUG] Creating FSx OpenZFS Volume: %s", input) - result, err := conn.CreateVolumeWithContext(ctx, input) + output, err := conn.CreateVolumeWithContext(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx OpenZFS Volume: %s", err) - } - - d.SetId(aws.StringValue(result.Volume.VolumeId)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating FSx for OpenZFS Volume (%s): %s", name, err) } + d.SetId(aws.StringValue(output.Volume.VolumeId)) + if _, err := waitVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Volume(%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) create: %s", d.Id(), err) } - return append(diags, resourceOpenzfsVolumeRead(ctx, d, meta)...) + return append(diags, resourceOpenZFSVolumeRead(ctx, d, meta)...) } -func resourceOpenzfsVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) volume, err := FindOpenZFSVolumeByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] FSx OpenZFS volume (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] FSx for OpenZFS Volume (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx OpenZFS Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading FSx for OpenZFS Volume (%s): %s", d.Id(), err) } openzfsConfig := volume.OpenZFSConfiguration - if volume.OntapConfiguration != nil { - return sdkdiag.AppendErrorf(diags, "expected FSx OpeZFS Volume, found FSx ONTAP Volume: %s", d.Id()) - } - - if openzfsConfig == nil { - return sdkdiag.AppendErrorf(diags, "describing FSx OpenZFS Volume (%s): empty Openzfs configuration", d.Id()) - } - d.Set("arn", volume.ResourceARN) d.Set("copy_tags_to_snapshots", openzfsConfig.CopyTagsToSnapshots) d.Set("data_compression_type", openzfsConfig.DataCompressionType) d.Set("name", volume.Name) + if err := d.Set("nfs_exports", flattenOpenZFSNfsExports(openzfsConfig.NfsExports)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) + } + if err := d.Set("origin_snapshot", flattenOpenZFSOriginSnapshotConfiguration(openzfsConfig.OriginSnapshot)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) + } d.Set("parent_volume_id", openzfsConfig.ParentVolumeId) d.Set("read_only", openzfsConfig.ReadOnly) d.Set("record_size_kib", openzfsConfig.RecordSizeKiB) d.Set("storage_capacity_quota_gib", openzfsConfig.StorageCapacityQuotaGiB) d.Set("storage_capacity_reservation_gib", openzfsConfig.StorageCapacityReservationGiB) - d.Set("volume_type", volume.VolumeType) - - if err := d.Set("origin_snapshot", flattenOpenzfsVolumeOriginSnapshot(openzfsConfig.OriginSnapshot)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) - } - - if err := d.Set("nfs_exports", flattenOpenzfsVolumeNFSExports(openzfsConfig.NfsExports)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) - } - - if err := d.Set("user_and_group_quotas", flattenOpenzfsVolumeUserAndGroupQuotas(openzfsConfig.UserAndGroupQuotas)); err != nil { + if err := d.Set("user_and_group_quotas", flattenOpenZFSUserOrGroupQuotas(openzfsConfig.UserAndGroupQuotas)); err != nil { return sdkdiag.AppendErrorf(diags, "setting user_and_group_quotas: %s", err) } + d.Set("volume_type", volume.VolumeType) return diags } -func resourceOpenzfsVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept("tags_all", "tags") { + if d.HasChangesExcept("tags", "tags_all") { input := &fsx.UpdateVolumeInput{ ClientRequestToken: aws.String(id.UniqueId()), - VolumeId: aws.String(d.Id()), OpenZFSConfiguration: &fsx.UpdateOpenZFSVolumeConfiguration{}, + VolumeId: aws.String(d.Id()), } if d.HasChange("data_compression_type") { @@ -340,7 +322,7 @@ func resourceOpenzfsVolumeUpdate(ctx context.Context, d *schema.ResourceData, me } if d.HasChange("nfs_exports") { - input.OpenZFSConfiguration.NfsExports = expandOpenzfsVolumeNFSExports(d.Get("nfs_exports").([]interface{})) + input.OpenZFSConfiguration.NfsExports = expandOpenZFSNfsExports(d.Get("nfs_exports").([]interface{})) } if d.HasChange("read_only") { @@ -360,29 +342,33 @@ func resourceOpenzfsVolumeUpdate(ctx context.Context, d *schema.ResourceData, me } if d.HasChange("user_and_group_quotas") { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenzfsVolumeUserAndGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) + input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) } startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSx OpenZFS Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS Volume (%s): %s", d.Id(), err) } if _, err := waitVolumeUpdated(ctx, conn, d.Id(), startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Volume (%s) update: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) update: %s", d.Id(), err) + } + + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) } } - return append(diags, resourceOpenzfsVolumeRead(ctx, d, meta)...) + return append(diags, resourceOpenZFSVolumeRead(ctx, d, meta)...) } -func resourceOpenzfsVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx OpenZFS Volume: %s", d.Id()) + log.Printf("[DEBUG] Deleting FSx for OpenZFS Volume: %s", d.Id()) _, err := conn.DeleteVolumeWithContext(ctx, &fsx.DeleteVolumeInput{ VolumeId: aws.String(d.Id()), }) @@ -392,21 +378,21 @@ func resourceOpenzfsVolumeDelete(ctx context.Context, d *schema.ResourceData, me } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting FSx OpenZFS Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting FSx for OpenZFS Volume (%s): %s", d.Id(), err) } if _, err := waitVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Volume (%s) delete: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) delete: %s", d.Id(), err) } return diags } -func expandOpenzfsVolumeUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { +func expandOpenZFSUserOrGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { quotas := []*fsx.OpenZFSUserOrGroupQuota{} for _, quota := range cfg { - expandedQuota := expandOpenzfsVolumeUserAndGroupQuota(quota.(map[string]interface{})) + expandedQuota := expandOpenZFSUserOrGroupQuota(quota.(map[string]interface{})) if expandedQuota != nil { quotas = append(quotas, expandedQuota) } @@ -415,7 +401,7 @@ func expandOpenzfsVolumeUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUser return quotas } -func expandOpenzfsVolumeUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { +func expandOpenZFSUserOrGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { if len(conf) < 1 { return nil } @@ -437,11 +423,11 @@ func expandOpenzfsVolumeUserAndGroupQuota(conf map[string]interface{}) *fsx.Open return &out } -func expandOpenzfsVolumeNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { +func expandOpenZFSNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { exports := []*fsx.OpenZFSNfsExport{} for _, export := range cfg { - expandedExport := expandOpenzfsVolumeNFSExport(export.(map[string]interface{})) + expandedExport := expandOpenZFSNfsExport(export.(map[string]interface{})) if expandedExport != nil { exports = append(exports, expandedExport) } @@ -450,21 +436,21 @@ func expandOpenzfsVolumeNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { return exports } -func expandOpenzfsVolumeNFSExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { +func expandOpenZFSNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { out := fsx.OpenZFSNfsExport{} if v, ok := cfg["client_configurations"]; ok { - out.ClientConfigurations = expandOpenzfsVolumeClinetConfigurations(v.(*schema.Set).List()) + out.ClientConfigurations = expandOpenZFSClientConfigurations(v.(*schema.Set).List()) } return &out } -func expandOpenzfsVolumeClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { +func expandOpenZFSClientConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { configurations := []*fsx.OpenZFSClientConfiguration{} for _, configuration := range cfg { - expandedConfiguration := expandOpenzfsVolumeClientConfiguration(configuration.(map[string]interface{})) + expandedConfiguration := expandOpenZFSClientConfiguration(configuration.(map[string]interface{})) if expandedConfiguration != nil { configurations = append(configurations, expandedConfiguration) } @@ -473,7 +459,7 @@ func expandOpenzfsVolumeClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSCl return configurations } -func expandOpenzfsVolumeClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { +func expandOpenZFSClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { out := fsx.OpenZFSClientConfiguration{} if v, ok := conf["clients"].(string); ok && len(v) > 0 { @@ -487,7 +473,7 @@ func expandOpenzfsVolumeClientConfiguration(conf map[string]interface{}) *fsx.Op return &out } -func expandOpenzfsCreateVolumeOriginSnapshot(cfg []interface{}) *fsx.CreateOpenZFSOriginSnapshotConfiguration { +func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *fsx.CreateOpenZFSOriginSnapshotConfiguration { if len(cfg) < 1 { return nil } @@ -507,13 +493,13 @@ func expandOpenzfsCreateVolumeOriginSnapshot(cfg []interface{}) *fsx.CreateOpenZ return &out } -func flattenOpenzfsVolumeNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { +func flattenOpenZFSNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { exports := make([]map[string]interface{}, 0) for _, export := range rs { if export != nil { cfg := make(map[string]interface{}) - cfg["client_configurations"] = flattenOpenzfsVolumeClientConfigurations(export.ClientConfigurations) + cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations) exports = append(exports, cfg) } } @@ -525,7 +511,7 @@ func flattenOpenzfsVolumeNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]int return nil } -func flattenOpenzfsVolumeClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { +func flattenOpenZFSClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { configurations := make([]map[string]interface{}, 0) for _, configuration := range rs { @@ -544,7 +530,7 @@ func flattenOpenzfsVolumeClientConfigurations(rs []*fsx.OpenZFSClientConfigurati return nil } -func flattenOpenzfsVolumeUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { +func flattenOpenZFSUserOrGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { quotas := make([]map[string]interface{}, 0) for _, quota := range rs { @@ -564,7 +550,7 @@ func flattenOpenzfsVolumeUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) [ return nil } -func flattenOpenzfsVolumeOriginSnapshot(rs *fsx.OpenZFSOriginSnapshotConfiguration) []interface{} { +func flattenOpenZFSOriginSnapshotConfiguration(rs *fsx.OpenZFSOriginSnapshotConfiguration) []interface{} { if rs == nil { return []interface{}{} } diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index 62bf627ff1f..4f534613e39 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestAccFSxOpenzfsVolume_basic(t *testing.T) { +func TestAccFSxOpenZFSVolume_basic(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -30,12 +30,12 @@ func TestAccFSxOpenzfsVolume_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), resource.TestCheckResourceAttr(resourceName, "data_compression_type", "NONE"), @@ -60,10 +60,9 @@ func TestAccFSxOpenzfsVolume_basic(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_parentVolume(t *testing.T) { +func TestAccFSxOpenZFSVolume_parentVolume(t *testing.T) { ctx := acctest.Context(t) var volume, volume2 fsx.Volume - var volumeId string resourceName := "aws_fsx_openzfs_volume.test" resourceName2 := "aws_fsx_openzfs_volume.test2" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -73,17 +72,16 @@ func TestAccFSxOpenzfsVolume_parentVolume(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_parent(rName, rName2), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume), - testAccCheckOpenzfsVolumeExists(ctx, resourceName2, &volume2), - testAccCheckOpenzfsVolumeGetID(resourceName, &volumeId), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume), + testAccCheckOpenZFSVolumeExists(ctx, resourceName2, &volume2), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), acctest.MatchResourceAttrRegionalARN(resourceName2, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), - resource.TestCheckResourceAttrPtr(resourceName2, "parent_volume_id", &volumeId), + resource.TestCheckResourceAttrPair(resourceName2, "parent_volume_id", resourceName, "id"), ), }, { @@ -95,7 +93,7 @@ func TestAccFSxOpenzfsVolume_parentVolume(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_tags(t *testing.T) { +func TestAccFSxOpenZFSVolume_tags(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -105,12 +103,12 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -123,8 +121,8 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -133,8 +131,8 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume3), - testAccCheckOpenzfsVolumeNotRecreated(&volume2, &volume3), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume3), + testAccCheckOpenZFSVolumeNotRecreated(&volume2, &volume3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -143,7 +141,7 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_copyTags(t *testing.T) { +func TestAccFSxOpenZFSVolume_copyTags(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -153,12 +151,12 @@ func TestAccFSxOpenzfsVolume_copyTags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_copyTags(rName, "key1", "value1", "true"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "true"), @@ -172,8 +170,8 @@ func TestAccFSxOpenzfsVolume_copyTags(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_copyTags(rName, "key1", "value1", "false"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), ), @@ -182,7 +180,7 @@ func TestAccFSxOpenzfsVolume_copyTags(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_name(t *testing.T) { +func TestAccFSxOpenZFSVolume_name(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -193,12 +191,12 @@ func TestAccFSxOpenzfsVolume_name(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), ), }, @@ -210,8 +208,8 @@ func TestAccFSxOpenzfsVolume_name(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_basic(rName2), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName2), ), }, @@ -219,7 +217,7 @@ func TestAccFSxOpenzfsVolume_name(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { +func TestAccFSxOpenZFSVolume_dataCompressionType(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -229,12 +227,12 @@ func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_dataCompression(rName, "ZSTD"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "data_compression_type", "ZSTD"), ), }, @@ -246,8 +244,8 @@ func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_dataCompression(rName, "NONE"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "data_compression_type", "NONE"), ), }, @@ -255,7 +253,7 @@ func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { +func TestAccFSxOpenZFSVolume_readOnly(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -265,12 +263,12 @@ func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_readOnly(rName, "false"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "read_only", "false"), ), }, @@ -282,8 +280,8 @@ func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_readOnly(rName, "true"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "read_only", "true"), ), }, @@ -291,7 +289,7 @@ func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { +func TestAccFSxOpenZFSVolume_recordSizeKib(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -301,12 +299,12 @@ func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_recordSizeKib(rName, 8), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "record_size_kib", "8"), ), }, @@ -318,8 +316,8 @@ func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_recordSizeKib(rName, 1024), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "record_size_kib", "1024"), ), }, @@ -327,7 +325,7 @@ func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { +func TestAccFSxOpenZFSVolume_storageCapacity(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -337,12 +335,12 @@ func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_storageCapacity(rName, 30, 20), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "storage_capacity_quota_gib", "30"), resource.TestCheckResourceAttr(resourceName, "storage_capacity_reservation_gib", "20"), ), @@ -355,8 +353,8 @@ func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_storageCapacity(rName, 40, 30), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "storage_capacity_quota_gib", "40"), resource.TestCheckResourceAttr(resourceName, "storage_capacity_reservation_gib", "30"), ), @@ -365,7 +363,7 @@ func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { +func TestAccFSxOpenZFSVolume_nfsExports(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -375,12 +373,12 @@ func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_nfsExports1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.clients", "10.0.1.0/24"), @@ -397,8 +395,8 @@ func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_nfsExports2(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "nfs_exports.0.client_configurations.*", map[string]string{ @@ -417,7 +415,7 @@ func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { +func TestAccFSxOpenZFSVolume_userAndGroupQuotas(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -427,12 +425,12 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_userAndGroupQuotas1(rName, 256), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ "id": "10", @@ -449,8 +447,8 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_userAndGroupQuotas2(rName, 128, 1024), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "4"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ "id": "10", @@ -478,31 +476,28 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { }) } -func testAccCheckOpenzfsVolumeExists(ctx context.Context, resourceName string, volume *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - volume1, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) + output, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if volume == nil { - return fmt.Errorf("FSx OpenZFS Volume (%s) not found", rs.Primary.ID) - } - - *volume = *volume1 + *v = *output return nil } } -func testAccCheckOpenzfsVolumeDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) @@ -511,85 +506,59 @@ func testAccCheckOpenzfsVolumeDestroy(ctx context.Context) resource.TestCheckFun continue } - volume, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) + _, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { continue } - if volume != nil { - return fmt.Errorf("FSx OpenZFS Volume (%s) still exists", rs.Primary.ID) + if err != nil { + return err } - } - return nil - } -} -func testAccCheckOpenzfsVolumeGetID(resourceName string, volumeId *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("FSx for OpenZFS Volume %s still exists", rs.Primary.ID) } - - *volumeId = rs.Primary.ID - return nil } } -func testAccCheckOpenzfsVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) != aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx OpenZFS Volume (%s) recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for OpenZFS Volume (%s) recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccCheckOpenzfsVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) == aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx OpenZFS Volume (%s) not recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for OpenZFS Volume (%s) not recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccOpenzfsVolumeBaseConfig(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -data "aws_partition" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - - tags = { - Name = %[1]q - } -} - +func testAccOpenZFSVolumeConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 + + tags = { + Name = %[1]q + } } `, rName)) } func testAccOpenZFSVolumeConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -598,7 +567,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_parent(rName, rName2 string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -612,7 +581,7 @@ resource "aws_fsx_openzfs_volume" "test2" { } func testAccOpenZFSVolumeConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -625,7 +594,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -640,7 +609,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_copyTags(rName, tagKey1, tagValue1, copyTags string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -654,7 +623,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_dataCompression(rName, dType string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -664,7 +633,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_readOnly(rName, readOnly string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -674,7 +643,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_recordSizeKib(rName string, recordSizeKib int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -684,7 +653,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_storageCapacity(rName string, storageQuota, storageReservation int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -695,7 +664,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_nfsExports1(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -711,7 +680,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_nfsExports2(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -730,7 +699,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_userAndGroupQuotas1(rName string, quotaSize int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -744,7 +713,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_userAndGroupQuotas2(rName string, userQuota, groupQuota int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id diff --git a/internal/service/fsx/service_package_gen.go b/internal/service/fsx/service_package_gen.go index 78f66ed5d79..a3a7ca036b9 100644 --- a/internal/service/fsx/service_package_gen.go +++ b/internal/service/fsx/service_package_gen.go @@ -126,7 +126,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceOpenzfsVolume, + Factory: ResourceOpenZFSVolume, TypeName: "aws_fsx_openzfs_volume", Name: "OpenZFS Volume", Tags: &types.ServicePackageResourceTags{ From de6588658dafbf328e7540a9283db455bb5d212d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 23 Sep 2023 14:54:37 -0400 Subject: [PATCH 25/38] Add 'TestAccFSxOpenZFSVolume_disappears'. --- internal/service/fsx/openzfs_volume_test.go | 26 ++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index 4f534613e39..c7d882a9429 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -34,7 +34,7 @@ func TestAccFSxOpenZFSVolume_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), @@ -60,6 +60,30 @@ func TestAccFSxOpenZFSVolume_basic(t *testing.T) { }) } +func TestAccFSxOpenZFSVolume_disappears(t *testing.T) { + ctx := acctest.Context(t) + var volume fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOpenZFSVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceOpenZFSVolume(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccFSxOpenZFSVolume_parentVolume(t *testing.T) { ctx := acctest.Context(t) var volume, volume2 fsx.Volume From 4bec1b68c543f8e90f3ce1b6ca102134dbb73729 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 23 Sep 2023 15:08:13 -0400 Subject: [PATCH 26/38] fsx: Consolidate OpenZFS flatteners and expanders. --- internal/service/fsx/openzfs_file_system.go | 147 +++----------------- internal/service/fsx/openzfs_volume.go | 6 +- 2 files changed, 22 insertions(+), 131 deletions(-) diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index f2c28c95a1c..c02c32af978 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -385,8 +385,8 @@ func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("disk_iops_configuration"); ok { - inputC.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(v.([]interface{})) - inputB.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(v.([]interface{})) + inputC.OpenZFSConfiguration.DiskIopsConfiguration = expandDiskIopsConfiguration(v.([]interface{})) + inputB.OpenZFSConfiguration.DiskIopsConfiguration = expandDiskIopsConfiguration(v.([]interface{})) } if v, ok := d.GetOk("endpoint_ip_address_range"); ok { @@ -405,8 +405,8 @@ func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("root_volume_configuration"); ok { - inputC.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSRootVolumeConfiguration(v.([]interface{})) - inputB.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSRootVolumeConfiguration(v.([]interface{})) + inputC.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSCreateRootVolumeConfiguration(v.([]interface{})) + inputB.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSCreateRootVolumeConfiguration(v.([]interface{})) } if v, ok := d.GetOk("route_table_ids"); ok { @@ -481,7 +481,7 @@ func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, d.Set("copy_tags_to_volumes", openZFSConfig.CopyTagsToVolumes) d.Set("daily_automatic_backup_start_time", openZFSConfig.DailyAutomaticBackupStartTime) d.Set("deployment_type", openZFSConfig.DeploymentType) - if err := d.Set("disk_iops_configuration", flattenOpenZFSFileDiskIopsConfiguration(openZFSConfig.DiskIopsConfiguration)); err != nil { + if err := d.Set("disk_iops_configuration", flattenDiskIopsConfiguration(openZFSConfig.DiskIopsConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting disk_iops_configuration: %s", err) } d.Set("dns_name", filesystem.DNSName) @@ -508,7 +508,7 @@ func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading FSx for OpenZFS File System (%s) root volume (%s): %s", d.Id(), rootVolumeID, err) } - if err := d.Set("root_volume_configuration", flattenOpenZFSRootVolumeConfiguration(rootVolume)); err != nil { + if err := d.Set("root_volume_configuration", flattenOpenZFSFileSystemRootVolume(rootVolume)); err != nil { return sdkdiag.AppendErrorf(diags, "setting root_volume_configuration: %s", err) } @@ -543,7 +543,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("disk_iops_configuration") { - input.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(d.Get("disk_iops_configuration").([]interface{})) + input.OpenZFSConfiguration.DiskIopsConfiguration = expandDiskIopsConfiguration(d.Get("disk_iops_configuration").([]interface{})) } if d.HasChange("route_table_ids") { @@ -590,7 +590,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData rootVolumeID := d.Get("root_volume_id").(string) input := &fsx.UpdateVolumeInput{ ClientRequestToken: aws.String(id.UniqueId()), - OpenZFSConfiguration: expandOpenZFSUpdateRootVolumeConfiguration(d.Get("root_volume_configuration").([]interface{})), + OpenZFSConfiguration: expandUpdateOpenZFSVolumeConfiguration(d.Get("root_volume_configuration").([]interface{})), VolumeId: aws.String(rootVolumeID), } @@ -638,7 +638,7 @@ func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData return diags } -func expandOpenZFSFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { +func expandDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { if len(cfg) < 1 { return nil } @@ -658,7 +658,7 @@ func expandOpenZFSFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConf return &out } -func expandOpenZFSRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { +func expandOpenZFSCreateRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { if len(cfg) < 1 { return nil } @@ -684,17 +684,17 @@ func expandOpenZFSRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateR } if v, ok := conf["user_and_group_quotas"]; ok { - out.UserAndGroupQuotas = expandOpenZFSUserAndGroupQuotas(v.(*schema.Set).List()) + out.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { - out.NfsExports = expandOpenZFSNFSExports(v) + out.NfsExports = expandOpenZFSNfsExports(v) } return &out } -func expandOpenZFSUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { +func expandUpdateOpenZFSVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { if len(cfg) < 1 { return nil } @@ -716,88 +716,17 @@ func expandOpenZFSUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOp } if v, ok := conf["user_and_group_quotas"]; ok { - out.UserAndGroupQuotas = expandOpenZFSUserAndGroupQuotas(v.(*schema.Set).List()) + out.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { - out.NfsExports = expandOpenZFSNFSExports(v) + out.NfsExports = expandOpenZFSNfsExports(v) } return &out } -func expandOpenZFSUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { - quotas := []*fsx.OpenZFSUserOrGroupQuota{} - - for _, quota := range cfg { - expandedQuota := expandOpenZFSUserAndGroupQuota(quota.(map[string]interface{})) - if expandedQuota != nil { - quotas = append(quotas, expandedQuota) - } - } - - return quotas -} - -func expandOpenZFSUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { - if len(conf) < 1 { - return nil - } - - out := fsx.OpenZFSUserOrGroupQuota{} - - if v, ok := conf["id"].(int); ok { - out.Id = aws.Int64(int64(v)) - } - - if v, ok := conf["storage_capacity_quota_gib"].(int); ok { - out.StorageCapacityQuotaGiB = aws.Int64(int64(v)) - } - - if v, ok := conf["type"].(string); ok { - out.Type = aws.String(v) - } - - return &out -} - -func expandOpenZFSNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { - exports := []*fsx.OpenZFSNfsExport{} - - for _, export := range cfg { - expandedExport := expandOpenZFSNFSExport(export.(map[string]interface{})) - if expandedExport != nil { - exports = append(exports, expandedExport) - } - } - - return exports -} - -func expandOpenZFSNFSExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { - out := fsx.OpenZFSNfsExport{} - - if v, ok := cfg["client_configurations"]; ok { - out.ClientConfigurations = expandOpenZFSClinetConfigurations(v.(*schema.Set).List()) - } - - return &out -} - -func expandOpenZFSClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { - configurations := []*fsx.OpenZFSClientConfiguration{} - - for _, configuration := range cfg { - expandedConfiguration := expandOpenZFSClientConfiguration(configuration.(map[string]interface{})) - if expandedConfiguration != nil { - configurations = append(configurations, expandedConfiguration) - } - } - - return configurations -} - -func flattenOpenZFSFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { +func flattenDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { if rs == nil { return []interface{}{} } @@ -813,7 +742,7 @@ func flattenOpenZFSFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []in return []interface{}{m} } -func flattenOpenZFSRootVolumeConfiguration(rs *fsx.Volume) []interface{} { +func flattenOpenZFSFileSystemRootVolume(rs *fsx.Volume) []interface{} { if rs == nil { return []interface{}{} } @@ -826,7 +755,7 @@ func flattenOpenZFSRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m["data_compression_type"] = aws.StringValue(rs.OpenZFSConfiguration.DataCompressionType) } if rs.OpenZFSConfiguration.NfsExports != nil { - m["nfs_exports"] = flattenOpenZFSFileNFSExports(rs.OpenZFSConfiguration.NfsExports) + m["nfs_exports"] = flattenOpenZFSNfsExports(rs.OpenZFSConfiguration.NfsExports) } if rs.OpenZFSConfiguration.ReadOnly != nil { m["read_only"] = aws.BoolValue(rs.OpenZFSConfiguration.ReadOnly) @@ -835,50 +764,12 @@ func flattenOpenZFSRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m["record_size_kib"] = aws.Int64Value(rs.OpenZFSConfiguration.RecordSizeKiB) } if rs.OpenZFSConfiguration.UserAndGroupQuotas != nil { - m["user_and_group_quotas"] = flattenOpenZFSFileUserAndGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) + m["user_and_group_quotas"] = flattenOpenZFSUserOrGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) } return []interface{}{m} } -func flattenOpenZFSFileNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { - exports := make([]map[string]interface{}, 0) - - for _, export := range rs { - if export != nil { - cfg := make(map[string]interface{}) - cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations) - exports = append(exports, cfg) - } - } - - if len(exports) > 0 { - return exports - } - - return nil -} - -func flattenOpenZFSFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { - quotas := make([]map[string]interface{}, 0) - - for _, quota := range rs { - if quota != nil { - cfg := make(map[string]interface{}) - cfg["id"] = aws.Int64Value(quota.Id) - cfg["storage_capacity_quota_gib"] = aws.Int64Value(quota.StorageCapacityQuotaGiB) - cfg["type"] = aws.StringValue(quota.Type) - quotas = append(quotas, cfg) - } - } - - if len(quotas) > 0 { - return quotas - } - - return nil -} - func FindOpenZFSFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeOpenzfs) diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index f798c7ec10d..af53d622e4f 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -423,7 +423,7 @@ func expandOpenZFSUserOrGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUser return &out } -func expandOpenZFSNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { +func expandOpenZFSNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name exports := []*fsx.OpenZFSNfsExport{} for _, export := range cfg { @@ -436,7 +436,7 @@ func expandOpenZFSNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { return exports } -func expandOpenZFSNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { +func expandOpenZFSNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name out := fsx.OpenZFSNfsExport{} if v, ok := cfg["client_configurations"]; ok { @@ -493,7 +493,7 @@ func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *fsx.Crea return &out } -func flattenOpenZFSNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { +func flattenOpenZFSNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { // nosemgrep:ci.caps4-in-func-name exports := make([]map[string]interface{}, 0) for _, export := range rs { From a923621ff6c29a14b623d53a4953d6ad2f191199 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 23 Sep 2023 18:40:43 -0400 Subject: [PATCH 27/38] Acceptance test output: % ACCTEST_TIMEOUT=720m make testacc TESTARGS='-run=TestAccFSxOpenZFSVolume_\|TestAccFSxOpenZFSFileSystem_' PKG=fsx ACCTEST_PARALLELISM=3 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/fsx/... -v -count 1 -parallel 3 -run=TestAccFSxOpenZFSVolume_\|TestAccFSxOpenZFSFileSystem_ -timeout 720m === RUN TestAccFSxOpenZFSFileSystem_basic === PAUSE TestAccFSxOpenZFSFileSystem_basic === RUN TestAccFSxOpenZFSFileSystem_diskIops === PAUSE TestAccFSxOpenZFSFileSystem_diskIops === RUN TestAccFSxOpenZFSFileSystem_disappears === PAUSE TestAccFSxOpenZFSFileSystem_disappears === RUN TestAccFSxOpenZFSFileSystem_rootVolume === PAUSE TestAccFSxOpenZFSFileSystem_rootVolume === RUN TestAccFSxOpenZFSFileSystem_securityGroupIDs === PAUSE TestAccFSxOpenZFSFileSystem_securityGroupIDs === RUN TestAccFSxOpenZFSFileSystem_tags === PAUSE TestAccFSxOpenZFSFileSystem_tags === RUN TestAccFSxOpenZFSFileSystem_copyTags === PAUSE TestAccFSxOpenZFSFileSystem_copyTags === RUN TestAccFSxOpenZFSFileSystem_throughput === PAUSE TestAccFSxOpenZFSFileSystem_throughput === RUN TestAccFSxOpenZFSFileSystem_storageType === PAUSE TestAccFSxOpenZFSFileSystem_storageType === RUN TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime === PAUSE TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime === RUN TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays === PAUSE TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays === RUN TestAccFSxOpenZFSFileSystem_kmsKeyID === PAUSE TestAccFSxOpenZFSFileSystem_kmsKeyID === RUN TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime === PAUSE TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime === RUN TestAccFSxOpenZFSFileSystem_throughputCapacity === PAUSE TestAccFSxOpenZFSFileSystem_throughputCapacity === RUN TestAccFSxOpenZFSFileSystem_storageCapacity === PAUSE TestAccFSxOpenZFSFileSystem_storageCapacity === RUN TestAccFSxOpenZFSFileSystem_deploymentType === PAUSE TestAccFSxOpenZFSFileSystem_deploymentType === RUN TestAccFSxOpenZFSFileSystem_multiAZ === PAUSE TestAccFSxOpenZFSFileSystem_multiAZ === RUN TestAccFSxOpenZFSFileSystem_routeTableIDs === PAUSE TestAccFSxOpenZFSFileSystem_routeTableIDs === RUN TestAccFSxOpenZFSVolume_basic === PAUSE TestAccFSxOpenZFSVolume_basic === RUN TestAccFSxOpenZFSVolume_disappears === PAUSE TestAccFSxOpenZFSVolume_disappears === RUN TestAccFSxOpenZFSVolume_parentVolume === PAUSE TestAccFSxOpenZFSVolume_parentVolume === RUN TestAccFSxOpenZFSVolume_tags === PAUSE TestAccFSxOpenZFSVolume_tags === RUN TestAccFSxOpenZFSVolume_copyTags === PAUSE TestAccFSxOpenZFSVolume_copyTags === RUN TestAccFSxOpenZFSVolume_name === PAUSE TestAccFSxOpenZFSVolume_name === RUN TestAccFSxOpenZFSVolume_dataCompressionType === PAUSE TestAccFSxOpenZFSVolume_dataCompressionType === RUN TestAccFSxOpenZFSVolume_readOnly === PAUSE TestAccFSxOpenZFSVolume_readOnly === RUN TestAccFSxOpenZFSVolume_recordSizeKib === PAUSE TestAccFSxOpenZFSVolume_recordSizeKib === RUN TestAccFSxOpenZFSVolume_storageCapacity === PAUSE TestAccFSxOpenZFSVolume_storageCapacity === RUN TestAccFSxOpenZFSVolume_nfsExports === PAUSE TestAccFSxOpenZFSVolume_nfsExports === RUN TestAccFSxOpenZFSVolume_userAndGroupQuotas === PAUSE TestAccFSxOpenZFSVolume_userAndGroupQuotas === CONT TestAccFSxOpenZFSFileSystem_basic === CONT TestAccFSxOpenZFSFileSystem_deploymentType === CONT TestAccFSxOpenZFSFileSystem_storageType --- PASS: TestAccFSxOpenZFSFileSystem_storageType (678.10s) === CONT TestAccFSxOpenZFSFileSystem_securityGroupIDs --- PASS: TestAccFSxOpenZFSFileSystem_basic (779.98s) === CONT TestAccFSxOpenZFSFileSystem_throughput --- PASS: TestAccFSxOpenZFSFileSystem_deploymentType (1501.91s) === CONT TestAccFSxOpenZFSFileSystem_copyTags --- PASS: TestAccFSxOpenZFSFileSystem_securityGroupIDs (1359.14s) === CONT TestAccFSxOpenZFSFileSystem_tags --- PASS: TestAccFSxOpenZFSFileSystem_throughput (1289.23s) === CONT TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime --- PASS: TestAccFSxOpenZFSFileSystem_copyTags (772.71s) === CONT TestAccFSxOpenZFSFileSystem_storageCapacity --- PASS: TestAccFSxOpenZFSFileSystem_tags (734.05s) === CONT TestAccFSxOpenZFSFileSystem_throughputCapacity --- PASS: TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime (744.93s) === CONT TestAccFSxOpenZFSVolume_name --- PASS: TestAccFSxOpenZFSFileSystem_storageCapacity (842.47s) === CONT TestAccFSxOpenZFSVolume_userAndGroupQuotas openzfs_volume_test.go:448: Step 1/3 error: After applying this test step and performing a `terraform refresh`, the plan was not empty. stdout Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_fsx_openzfs_volume.test will be updated in-place ~ resource "aws_fsx_openzfs_volume" "test" { id = "fsvol-0d1449f0ba72cf246" name = "tf-acc-test-8593924653345881072" tags = {} # (10 unchanged attributes hidden) - user_and_group_quotas { - id = 0 -> null - storage_capacity_quota_gib = 0 -> null - type = "GROUP" -> null } - user_and_group_quotas { - id = 0 -> null - storage_capacity_quota_gib = 0 -> null - type = "USER" -> null } # (2 unchanged blocks hidden) } Plan: 0 to add, 1 to change, 0 to destroy. --- PASS: TestAccFSxOpenZFSVolume_name (1057.51s) === CONT TestAccFSxOpenZFSVolume_nfsExports --- FAIL: TestAccFSxOpenZFSVolume_userAndGroupQuotas (778.11s) === CONT TestAccFSxOpenZFSVolume_storageCapacity --- PASS: TestAccFSxOpenZFSFileSystem_throughputCapacity (1269.32s) === CONT TestAccFSxOpenZFSVolume_recordSizeKib --- PASS: TestAccFSxOpenZFSVolume_nfsExports (1032.34s) === CONT TestAccFSxOpenZFSVolume_readOnly --- PASS: TestAccFSxOpenZFSVolume_storageCapacity (1022.60s) === CONT TestAccFSxOpenZFSVolume_dataCompressionType --- PASS: TestAccFSxOpenZFSVolume_recordSizeKib (1044.49s) === CONT TestAccFSxOpenZFSVolume_disappears --- PASS: TestAccFSxOpenZFSVolume_disappears (815.04s) === CONT TestAccFSxOpenZFSVolume_copyTags --- PASS: TestAccFSxOpenZFSVolume_dataCompressionType (1001.99s) === CONT TestAccFSxOpenZFSVolume_tags --- PASS: TestAccFSxOpenZFSVolume_readOnly (1021.45s) === CONT TestAccFSxOpenZFSVolume_parentVolume --- PASS: TestAccFSxOpenZFSVolume_tags (882.95s) === CONT TestAccFSxOpenZFSFileSystem_multiAZ --- PASS: TestAccFSxOpenZFSVolume_parentVolume (896.20s) === CONT TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays --- PASS: TestAccFSxOpenZFSVolume_copyTags (967.24s) === CONT TestAccFSxOpenZFSFileSystem_kmsKeyID --- PASS: TestAccFSxOpenZFSFileSystem_kmsKeyID (733.53s) === CONT TestAccFSxOpenZFSVolume_basic --- PASS: TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays (886.28s) === CONT TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime --- PASS: TestAccFSxOpenZFSFileSystem_multiAZ (988.42s) === CONT TestAccFSxOpenZFSFileSystem_disappears --- PASS: TestAccFSxOpenZFSVolume_basic (822.03s) === CONT TestAccFSxOpenZFSFileSystem_rootVolume --- PASS: TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime (821.43s) === CONT TestAccFSxOpenZFSFileSystem_routeTableIDs --- PASS: TestAccFSxOpenZFSFileSystem_disappears (748.71s) === CONT TestAccFSxOpenZFSFileSystem_diskIops --- PASS: TestAccFSxOpenZFSFileSystem_diskIops (783.62s) --- PASS: TestAccFSxOpenZFSFileSystem_routeTableIDs (1109.79s) --- PASS: TestAccFSxOpenZFSFileSystem_rootVolume (1536.94s) FAIL FAIL github.com/hashicorp/terraform-provider-aws/internal/service/fsx 9964.705s FAIL make: *** [testacc] Error 1 Failure is unrelated to this change and occurs in CI. From f8a0aa5d3d56ec991985739909b6d0b974a76ccd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 24 Sep 2023 11:19:57 -0400 Subject: [PATCH 28/38] r/aws_fsx_ontap_volume: Add 'snaplock_configuration' argument. --- .changelog/32530.txt | 4 + internal/service/fsx/ontap_volume.go | 406 ++++++++++++++++-- internal/service/fsx/ontap_volume_test.go | 2 + .../r/fsx_ontap_file_system.html.markdown | 2 +- website/docs/r/fsx_ontap_volume.html.markdown | 30 +- 5 files changed, 415 insertions(+), 29 deletions(-) diff --git a/.changelog/32530.txt b/.changelog/32530.txt index db6d3e812f9..32a51c986a9 100644 --- a/.changelog/32530.txt +++ b/.changelog/32530.txt @@ -1,3 +1,7 @@ ```release-note:enhancement resource/aws_fsx_ontap_volume: Add `copy_tags_to_backups` and `snapshot_policy` arguments ``` + +```release-note:enhancement +resource/aws_fsx_ontap_volume: Add `snaplock_configuration` configuration block to support [SnapLock](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snaplock.html) +``` diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index f3633256e07..43c6cc6124b 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -102,6 +102,122 @@ func ResourceONTAPVolume() *schema.Resource { Optional: true, Default: false, }, + "snaplock_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_log_volume": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "autocommit_period": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(fsx.AutocommitPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 65535), + }, + }, + }, + }, + "privileged_delete": { + Type: schema.TypeString, + Optional: true, + Default: fsx.PrivilegedDeleteDisabled, + ValidateFunc: validation.StringInSlice(fsx.PrivilegedDelete_Values(), false), + }, + "retention_period": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_retention": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + }, + }, + }, + }, + "maximum_retention": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + }, + }, + }, + }, + "minimum_retention": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + }, + }, + }, + }, + }, + }, + }, + "snaplock_type": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(fsx.SnaplockType_Values(), false), + }, + "volume_append_mode_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, "snapshot_policy": { Type: schema.TypeString, Optional: true, @@ -189,6 +305,10 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta input.OntapConfiguration.SecurityStyle = aws.String(v.(string)) } + if v, ok := d.GetOk("snaplock_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.OntapConfiguration.SnaplockConfiguration = expandCreateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + if v, ok := d.GetOk("snapshot_policy"); ok { input.OntapConfiguration.SnapshotPolicy = aws.String(v.(string)) } @@ -197,8 +317,8 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta input.OntapConfiguration.StorageEfficiencyEnabled = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("tiering_policy"); ok { - input.OntapConfiguration.TieringPolicy = expandTieringPolicy(v.([]interface{})) + if v, ok := d.GetOk("tiering_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.OntapConfiguration.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) } output, err := conn.CreateVolumeWithContext(ctx, input) @@ -242,11 +362,22 @@ func resourceONTAPVolumeRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("ontap_volume_type", ontapConfig.OntapVolumeType) d.Set("security_style", ontapConfig.SecurityStyle) d.Set("size_in_megabytes", ontapConfig.SizeInMegabytes) + if ontapConfig.SnaplockConfiguration != nil { + if err := d.Set("snaplock_configuration", []interface{}{flattenSnaplockConfiguration(ontapConfig.SnaplockConfiguration)}); err != nil { + return diag.Errorf("setting snaplock_configuration: %s", err) + } + } else { + d.Set("snaplock_configuration", nil) + } d.Set("snapshot_policy", ontapConfig.SnapshotPolicy) d.Set("storage_efficiency_enabled", ontapConfig.StorageEfficiencyEnabled) d.Set("storage_virtual_machine_id", ontapConfig.StorageVirtualMachineId) - if err := d.Set("tiering_policy", flattenTieringPolicy(ontapConfig.TieringPolicy)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tiering_policy: %s", err) + if ontapConfig.TieringPolicy != nil { + if err := d.Set("tiering_policy", []interface{}{flattenTieringPolicy(ontapConfig.TieringPolicy)}); err != nil { + return diag.Errorf("setting tiering_policy: %s", err) + } + } else { + d.Set("tiering_policy", nil) } d.Set("uuid", ontapConfig.UUID) d.Set("volume_type", volume.VolumeType) @@ -281,6 +412,12 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta input.OntapConfiguration.SizeInMegabytes = aws.Int64(int64(d.Get("size_in_megabytes").(int))) } + if d.HasChange("snaplock_configuration") { + if v, ok := d.GetOk("snaplock_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.OntapConfiguration.SnaplockConfiguration = expandUpdateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + } + if d.HasChange("snapshot_policy") { input.OntapConfiguration.SnapshotPolicy = aws.String(d.Get("snapshot_policy").(string)) } @@ -290,7 +427,9 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange("tiering_policy") { - input.OntapConfiguration.TieringPolicy = expandTieringPolicy(d.Get("tiering_policy").([]interface{})) + if v, ok := d.GetOk("tiering_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.OntapConfiguration.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) + } } startTime := time.Now() @@ -339,45 +478,260 @@ func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func expandTieringPolicy(cfg []interface{}) *fsx.TieringPolicy { - if len(cfg) < 1 { +const minTieringPolicyCoolingPeriod = 2 + +func expandTieringPolicy(tfMap map[string]interface{}) *fsx.TieringPolicy { + if tfMap == nil { + return nil + } + + apiObject := &fsx.TieringPolicy{} + + // Cooling period only accepts a minimum of 2 but int will return 0 not nil if unset. + // Therefore we only set it if it is 2 or more. + if v, ok := tfMap["cooling_period"].(int); ok && v >= minTieringPolicyCoolingPeriod { + apiObject.CoolingPeriod = aws.Int64(int64(v)) + } + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + return apiObject +} + +func flattenTieringPolicy(apiObject *fsx.TieringPolicy) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.CoolingPeriod; v != nil { + if v := aws.Int64Value(v); v >= minTieringPolicyCoolingPeriod { + tfMap["cooling_period"] = v + } + } + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + return tfMap +} + +func expandCreateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.CreateSnaplockConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &fsx.CreateSnaplockConfiguration{} + + if v, ok := tfMap["audit_log_volume"].(bool); ok && v { + apiObject.AuditLogVolume = aws.Bool(v) + } + + if v, ok := tfMap["autocommit_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.AutocommitPeriod = expandAutocommitPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["privileged_delete"].(string); ok && v != "" { + apiObject.PrivilegedDelete = aws.String(v) + } + + if v, ok := tfMap["retention_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RetentionPeriod = expandSnaplockRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["snaplock_type"].(string); ok && v != "" { + apiObject.SnaplockType = aws.String(v) + } + + if v, ok := tfMap["volume_append_mode_enabled"].(bool); ok && v { + apiObject.VolumeAppendModeEnabled = aws.Bool(v) + } + + return apiObject +} + +func expandUpdateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.UpdateSnaplockConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &fsx.UpdateSnaplockConfiguration{} + + if v, ok := tfMap["audit_log_volume"].(bool); ok && v { + apiObject.AuditLogVolume = aws.Bool(v) + } + + if v, ok := tfMap["autocommit_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.AutocommitPeriod = expandAutocommitPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["privileged_delete"].(string); ok && v != "" { + apiObject.PrivilegedDelete = aws.String(v) + } + + if v, ok := tfMap["retention_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RetentionPeriod = expandSnaplockRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["volume_append_mode_enabled"].(bool); ok && v { + apiObject.VolumeAppendModeEnabled = aws.Bool(v) + } + + return apiObject +} + +func expandAutocommitPeriod(tfMap map[string]interface{}) *fsx.AutocommitPeriod { + if tfMap == nil { + return nil + } + + apiObject := &fsx.AutocommitPeriod{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = aws.String(v) + } + + if v, ok := tfMap["value"].(int); ok && v != 0 { + apiObject.Value = aws.Int64(int64(v)) + } + + return apiObject +} + +func expandSnaplockRetentionPeriod(tfMap map[string]interface{}) *fsx.SnaplockRetentionPeriod { + if tfMap == nil { return nil } - conf := cfg[0].(map[string]interface{}) + apiObject := &fsx.SnaplockRetentionPeriod{} + + if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DefaultRetention = expandRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.MaximumRetention = expandRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["minimum_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.MinimumRetention = expandRetentionPeriod(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandRetentionPeriod(tfMap map[string]interface{}) *fsx.RetentionPeriod { + if tfMap == nil { + return nil + } - out := fsx.TieringPolicy{} + apiObject := &fsx.RetentionPeriod{} - //Cooling period only accepts a minimum of 2 but int will return 0 not nil if unset - //Therefore we only set it if it is 2 or more - if v, ok := conf["cooling_period"].(int); ok && v >= 2 { - out.CoolingPeriod = aws.Int64(int64(v)) + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = aws.String(v) } - if v, ok := conf["name"].(string); ok { - out.Name = aws.String(v) + if v, ok := tfMap["value"].(int); ok && v != 0 { + apiObject.Value = aws.Int64(int64(v)) } - return &out + return apiObject } -func flattenTieringPolicy(rs *fsx.TieringPolicy) []interface{} { - if rs == nil { - return []interface{}{} +func flattenSnaplockConfiguration(apiObject *fsx.SnaplockConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AuditLogVolume; v != nil { + tfMap["audit_log_volume"] = aws.BoolValue(v) + } + + if v := apiObject.AutocommitPeriod; v != nil { + tfMap["nested_attribute_name"] = []interface{}{flattenAutocommitPeriod(v)} + } + + if v := apiObject.PrivilegedDelete; v != nil { + tfMap["privileged_delete"] = aws.StringValue(v) + } + + if v := apiObject.RetentionPeriod; v != nil { + tfMap["retention_period"] = []interface{}{flattenSnaplockRetentionPeriod(v)} + } + + if v := apiObject.SnaplockType; v != nil { + tfMap["snaplock_type"] = aws.StringValue(v) + } + + if v := apiObject.VolumeAppendModeEnabled; v != nil { + tfMap["volume_append_mode_enabled"] = aws.BoolValue(v) + } + + return tfMap +} + +func flattenAutocommitPeriod(apiObject *fsx.AutocommitPeriod) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != nil { + tfMap["type"] = aws.StringValue(v) + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.Int64Value(v) + } + + return tfMap +} + +func flattenSnaplockRetentionPeriod(apiObject *fsx.SnaplockRetentionPeriod) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.DefaultRetention; v != nil { + tfMap["default_retention"] = []interface{}{flattenRetentionPeriod(v)} + } + + if v := apiObject.MaximumRetention; v != nil { + tfMap["maximum_retention"] = []interface{}{flattenRetentionPeriod(v)} + } + + if v := apiObject.MinimumRetention; v != nil { + tfMap["minimum_retention"] = []interface{}{flattenRetentionPeriod(v)} + } + + return tfMap +} + +func flattenRetentionPeriod(apiObject *fsx.RetentionPeriod) map[string]interface{} { + if apiObject == nil { + return nil } - minCoolingPeriod := 2 + tfMap := map[string]interface{}{} - m := make(map[string]interface{}) - if aws.Int64Value(rs.CoolingPeriod) >= int64(minCoolingPeriod) { - m["cooling_period"] = aws.Int64Value(rs.CoolingPeriod) + if v := apiObject.Type; v != nil { + tfMap["type"] = aws.StringValue(v) } - if rs.Name != nil { - m["name"] = aws.StringValue(rs.Name) + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.Int64Value(v) } - return []interface{}{m} + return tfMap } func FindONTAPVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 7609d2897a9..1b01b803a8a 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -45,10 +45,12 @@ func TestAccFSxONTAPVolume_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_style", ""), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", "1024"), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", "false"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "snapshot_policy", "default"), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "true"), resource.TestCheckResourceAttrSet(resourceName, "storage_virtual_machine_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tiering_policy.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "uuid"), resource.TestCheckResourceAttr(resourceName, "volume_type", "ONTAP"), ), diff --git a/website/docs/r/fsx_ontap_file_system.html.markdown b/website/docs/r/fsx_ontap_file_system.html.markdown index 3ce1a2d55bc..c262d366b88 100644 --- a/website/docs/r/fsx_ontap_file_system.html.markdown +++ b/website/docs/r/fsx_ontap_file_system.html.markdown @@ -36,7 +36,7 @@ This resource supports the following arguments: * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set. -* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See [Disk Iops Configuration](#disk-iops-configuration) Below. +* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See [Disk Iops Configuration](#disk-iops-configuration) below. * `endpoint_ip_address_range` - (Optional) Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. * `storage_type` - (Optional) - The filesystem storage type. defaults to `SSD`. * `fsx_admin_password` - (Optional) The ONTAP administrative password for the fsxadmin user that you can use to administer your file system using the ONTAP CLI and REST API. diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index 9652df875b9..77b41b3ff94 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -55,14 +55,40 @@ This resource supports the following arguments: * `security_style` - (Optional) Specifies the volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. * `size_in_megabytes` - (Required) Specifies the size of the volume, in megabytes (MB), that you are creating. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the volume is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. +* `snaplock_configuration` - (Optional) The SnapLock configuration for an FSx for ONTAP volume. See [SnapLock Configuration](#snaplock-configuration) below. * `snapshot_policy` - (Optional) Specifies the snapshot policy for the volume. See [snapshot policies](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies) in the Amazon FSx ONTAP User Guide * `storage_efficiency_enabled` - (Optional) Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume. * `storage_virtual_machine_id` - (Required) Specifies the storage virtual machine in which to create the volume. * `tags` - (Optional) A map of tags to assign to the volume. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tiering_policy` - (Optional) The data tiering policy for an FSx for ONTAP volume. See [Tiering Policy](#tiering-policy) below. -### tiering_policy +### SnapLock Configuration -The `tiering_policy` configuration block supports the following arguments: +* `audit_log_volume` - (Optional) Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default value is `false`. +* `autocommit_period` - (Optional) The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume. See [Autocommit Period](#autocommit-period) below. +* `privileged_delete` - (Optional) Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Valid values: `DISABLED`, `ENABLED`, `PERMANENTLY_DISABLED`. The default value is `DISABLED`. +* `retention_period` - (Optional) The retention period of an FSx for ONTAP SnapLock volume. See [SnapLock Retention Period](#snaplock-retention-period) below. +* `snaplock_type` - (Required) Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. Valid values: `COMPLIANCE`, `ENTERPRISE`. +* `volume_append_mode_enabled` - (Optional) Enables or disables volume-append mode on an FSx for ONTAP SnapLock volume. The default value is `false`. + +### Autocommit Period + +* `type` - (Required) The type of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. Setting this value to `NONE` disables autocommit. Valid values: `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `NONE`. +* `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. + +### SnapLock Retention Period + +* `default_retention` - (Required) The retention period assigned to a write once, read many (WORM) file by default +if an explicit retention period is not set for an FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to the minimum retention period and less than or equal to the maximum retention period. See [Retention Period](#retention-period) below. +* `maximum_retention` - (Required) The longest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. +* `minimum_retention` - (Required) The shortest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. + +### Retention Period + +* `type` - (Required) The type of time for the retention period of an FSx for ONTAP SnapLock volume. Set it to one of the valid types. If you set it to `INFINITE`, the files are retained forever. If you set it to `UNSPECIFIED`, the files are retained until you set an explicit retention period. Valid values: `SECONDS`, `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `INFINITE`, `UNSPECIFIED`. +* `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. + +### Tiering Policy * `name` - (Required) Specifies the tiering policy for the ONTAP volume for moving data to the capacity pool storage. Valid values are `SNAPSHOT_ONLY`, `AUTO`, `ALL`, `NONE`. Default value is `SNAPSHOT_ONLY`. * `cooling_period` - (Optional) Specifies the number of days that user data in a volume must remain inactive before it is considered "cold" and moved to the capacity pool. Used with `AUTO` and `SNAPSHOT_ONLY` tiering policies only. Valid values are whole numbers between 2 and 183. Default values are 31 days for `AUTO` and 2 days for `SNAPSHOT_ONLY`. From d1d34a1eb22997bff6e84757510d1614ae941de8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 24 Sep 2023 11:24:24 -0400 Subject: [PATCH 29/38] fsx: Cosmetics. --- internal/service/fsx/ontap_volume.go | 64 ++++++++++++----------- internal/service/fsx/openzfs_volume.go | 72 ++++++++++++++------------ 2 files changed, 72 insertions(+), 64 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 43c6cc6124b..66ac9371d57 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -278,47 +278,49 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - name := d.Get("name").(string) - input := &fsx.CreateVolumeInput{ - Name: aws.String(name), - OntapConfiguration: &fsx.CreateOntapVolumeConfiguration{ - SizeInMegabytes: aws.Int64(int64(d.Get("size_in_megabytes").(int))), - StorageVirtualMachineId: aws.String(d.Get("storage_virtual_machine_id").(string)), - }, - Tags: getTagsIn(ctx), - VolumeType: aws.String(d.Get("volume_type").(string)), + ontapConfig := &fsx.CreateOntapVolumeConfiguration{ + SizeInMegabytes: aws.Int64(int64(d.Get("size_in_megabytes").(int))), + StorageVirtualMachineId: aws.String(d.Get("storage_virtual_machine_id").(string)), } if v, ok := d.GetOk("copy_tags_to_backups"); ok { - input.OntapConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + ontapConfig.CopyTagsToBackups = aws.Bool(v.(bool)) } if v, ok := d.GetOk("junction_path"); ok { - input.OntapConfiguration.JunctionPath = aws.String(v.(string)) + ontapConfig.JunctionPath = aws.String(v.(string)) } if v, ok := d.GetOk("ontap_volume_type"); ok { - input.OntapConfiguration.OntapVolumeType = aws.String(v.(string)) + ontapConfig.OntapVolumeType = aws.String(v.(string)) } if v, ok := d.GetOk("security_style"); ok { - input.OntapConfiguration.SecurityStyle = aws.String(v.(string)) + ontapConfig.SecurityStyle = aws.String(v.(string)) } if v, ok := d.GetOk("snaplock_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.OntapConfiguration.SnaplockConfiguration = expandCreateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) + ontapConfig.SnaplockConfiguration = expandCreateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) } if v, ok := d.GetOk("snapshot_policy"); ok { - input.OntapConfiguration.SnapshotPolicy = aws.String(v.(string)) + ontapConfig.SnapshotPolicy = aws.String(v.(string)) } if v, ok := d.GetOkExists("storage_efficiency_enabled"); ok { - input.OntapConfiguration.StorageEfficiencyEnabled = aws.Bool(v.(bool)) + ontapConfig.StorageEfficiencyEnabled = aws.Bool(v.(bool)) } if v, ok := d.GetOk("tiering_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.OntapConfiguration.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) + ontapConfig.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) + } + + name := d.Get("name").(string) + input := &fsx.CreateVolumeInput{ + Name: aws.String(name), + OntapConfiguration: ontapConfig, + Tags: getTagsIn(ctx), + VolumeType: aws.String(d.Get("volume_type").(string)), } output, err := conn.CreateVolumeWithContext(ctx, input) @@ -390,48 +392,50 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta conn := meta.(*conns.AWSClient).FSxConn(ctx) if d.HasChangesExcept("tags", "tags_all") { - input := &fsx.UpdateVolumeInput{ - ClientRequestToken: aws.String(id.UniqueId()), - OntapConfiguration: &fsx.UpdateOntapVolumeConfiguration{}, - VolumeId: aws.String(d.Id()), - } + ontapConfig := &fsx.UpdateOntapVolumeConfiguration{} if d.HasChange("copy_tags_to_backups") { - input.OntapConfiguration.CopyTagsToBackups = aws.Bool(d.Get("copy_tags_to_backups").(bool)) + ontapConfig.CopyTagsToBackups = aws.Bool(d.Get("copy_tags_to_backups").(bool)) } if d.HasChange("junction_path") { - input.OntapConfiguration.JunctionPath = aws.String(d.Get("junction_path").(string)) + ontapConfig.JunctionPath = aws.String(d.Get("junction_path").(string)) } if d.HasChange("security_style") { - input.OntapConfiguration.SecurityStyle = aws.String(d.Get("security_style").(string)) + ontapConfig.SecurityStyle = aws.String(d.Get("security_style").(string)) } if d.HasChange("size_in_megabytes") { - input.OntapConfiguration.SizeInMegabytes = aws.Int64(int64(d.Get("size_in_megabytes").(int))) + ontapConfig.SizeInMegabytes = aws.Int64(int64(d.Get("size_in_megabytes").(int))) } if d.HasChange("snaplock_configuration") { if v, ok := d.GetOk("snaplock_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.OntapConfiguration.SnaplockConfiguration = expandUpdateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) + ontapConfig.SnaplockConfiguration = expandUpdateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) } } if d.HasChange("snapshot_policy") { - input.OntapConfiguration.SnapshotPolicy = aws.String(d.Get("snapshot_policy").(string)) + ontapConfig.SnapshotPolicy = aws.String(d.Get("snapshot_policy").(string)) } if d.HasChange("storage_efficiency_enabled") { - input.OntapConfiguration.StorageEfficiencyEnabled = aws.Bool(d.Get("storage_efficiency_enabled").(bool)) + ontapConfig.StorageEfficiencyEnabled = aws.Bool(d.Get("storage_efficiency_enabled").(bool)) } if d.HasChange("tiering_policy") { if v, ok := d.GetOk("tiering_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.OntapConfiguration.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) + ontapConfig.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) } } + input := &fsx.UpdateVolumeInput{ + ClientRequestToken: aws.String(id.UniqueId()), + OntapConfiguration: ontapConfig, + VolumeId: aws.String(d.Id()), + } + startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index af53d622e4f..98b8865d84d 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -199,51 +199,53 @@ func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, me var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - name := d.Get("name").(string) - input := &fsx.CreateVolumeInput{ - ClientRequestToken: aws.String(id.UniqueId()), - Name: aws.String(name), - OpenZFSConfiguration: &fsx.CreateOpenZFSVolumeConfiguration{ - ParentVolumeId: aws.String(d.Get("parent_volume_id").(string)), - }, - Tags: getTagsIn(ctx), - VolumeType: aws.String(d.Get("volume_type").(string)), + openzfsConfig := &fsx.CreateOpenZFSVolumeConfiguration{ + ParentVolumeId: aws.String(d.Get("parent_volume_id").(string)), } if v, ok := d.GetOk("copy_tags_to_snapshots"); ok { - input.OpenZFSConfiguration.CopyTagsToSnapshots = aws.Bool(v.(bool)) + openzfsConfig.CopyTagsToSnapshots = aws.Bool(v.(bool)) } if v, ok := d.GetOk("data_compression_type"); ok { - input.OpenZFSConfiguration.DataCompressionType = aws.String(v.(string)) + openzfsConfig.DataCompressionType = aws.String(v.(string)) } if v, ok := d.GetOk("nfs_exports"); ok { - input.OpenZFSConfiguration.NfsExports = expandOpenZFSNfsExports(v.([]interface{})) + openzfsConfig.NfsExports = expandOpenZFSNfsExports(v.([]interface{})) } if v, ok := d.GetOk("origin_snapshot"); ok { - input.OpenZFSConfiguration.OriginSnapshot = expandCreateOpenZFSOriginSnapshotConfiguration(v.([]interface{})) + openzfsConfig.OriginSnapshot = expandCreateOpenZFSOriginSnapshotConfiguration(v.([]interface{})) } if v, ok := d.GetOk("read_only"); ok { - input.OpenZFSConfiguration.ReadOnly = aws.Bool(v.(bool)) + openzfsConfig.ReadOnly = aws.Bool(v.(bool)) } if v, ok := d.GetOk("record_size_kib"); ok { - input.OpenZFSConfiguration.RecordSizeKiB = aws.Int64(int64(v.(int))) + openzfsConfig.RecordSizeKiB = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("storage_capacity_quota_gib"); ok { - input.OpenZFSConfiguration.StorageCapacityQuotaGiB = aws.Int64(int64(v.(int))) + openzfsConfig.StorageCapacityQuotaGiB = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("storage_capacity_reservation_gib"); ok { - input.OpenZFSConfiguration.StorageCapacityReservationGiB = aws.Int64(int64(v.(int))) + openzfsConfig.StorageCapacityReservationGiB = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("user_and_group_quotas"); ok { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) + openzfsConfig.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) + } + + name := d.Get("name").(string) + input := &fsx.CreateVolumeInput{ + ClientRequestToken: aws.String(id.UniqueId()), + Name: aws.String(name), + OpenZFSConfiguration: openzfsConfig, + Tags: getTagsIn(ctx), + VolumeType: aws.String(d.Get("volume_type").(string)), } output, err := conn.CreateVolumeWithContext(ctx, input) @@ -307,42 +309,44 @@ func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, me conn := meta.(*conns.AWSClient).FSxConn(ctx) if d.HasChangesExcept("tags", "tags_all") { - input := &fsx.UpdateVolumeInput{ - ClientRequestToken: aws.String(id.UniqueId()), - OpenZFSConfiguration: &fsx.UpdateOpenZFSVolumeConfiguration{}, - VolumeId: aws.String(d.Id()), - } + openzfsConfig := &fsx.UpdateOpenZFSVolumeConfiguration{} if d.HasChange("data_compression_type") { - input.OpenZFSConfiguration.DataCompressionType = aws.String(d.Get("data_compression_type").(string)) - } - - if d.HasChange("name") { - input.Name = aws.String(d.Get("name").(string)) + openzfsConfig.DataCompressionType = aws.String(d.Get("data_compression_type").(string)) } if d.HasChange("nfs_exports") { - input.OpenZFSConfiguration.NfsExports = expandOpenZFSNfsExports(d.Get("nfs_exports").([]interface{})) + openzfsConfig.NfsExports = expandOpenZFSNfsExports(d.Get("nfs_exports").([]interface{})) } if d.HasChange("read_only") { - input.OpenZFSConfiguration.ReadOnly = aws.Bool(d.Get("read_only").(bool)) + openzfsConfig.ReadOnly = aws.Bool(d.Get("read_only").(bool)) } if d.HasChange("record_size_kib") { - input.OpenZFSConfiguration.RecordSizeKiB = aws.Int64(int64(d.Get("record_size_kib").(int))) + openzfsConfig.RecordSizeKiB = aws.Int64(int64(d.Get("record_size_kib").(int))) } if d.HasChange("storage_capacity_quota_gib") { - input.OpenZFSConfiguration.StorageCapacityQuotaGiB = aws.Int64(int64(d.Get("storage_capacity_quota_gib").(int))) + openzfsConfig.StorageCapacityQuotaGiB = aws.Int64(int64(d.Get("storage_capacity_quota_gib").(int))) } if d.HasChange("storage_capacity_reservation_gib") { - input.OpenZFSConfiguration.StorageCapacityReservationGiB = aws.Int64(int64(d.Get("storage_capacity_reservation_gib").(int))) + openzfsConfig.StorageCapacityReservationGiB = aws.Int64(int64(d.Get("storage_capacity_reservation_gib").(int))) } if d.HasChange("user_and_group_quotas") { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) + openzfsConfig.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) + } + + input := &fsx.UpdateVolumeInput{ + ClientRequestToken: aws.String(id.UniqueId()), + OpenZFSConfiguration: openzfsConfig, + VolumeId: aws.String(d.Id()), + } + + if d.HasChange("name") { + input.Name = aws.String(d.Get("name").(string)) } startTime := time.Now() From a4b8d57b67708fe861d0ff3cd4c8f79ec77d3196 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 24 Sep 2023 14:55:37 -0400 Subject: [PATCH 30/38] Add 'TestAccFSxONTAPVolume_snaplock'. --- internal/service/fsx/ontap_volume_test.go | 117 ++++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 1b01b803a8a..0cb8779aea9 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -323,6 +323,66 @@ func TestAccFSxONTAPVolume_size(t *testing.T) { }) } +func TestAccFSxONTAPVolume_snaplock(t *testing.T) { + ctx := acctest.Context(t) + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_ontap_volume.test" + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPVolumeConfig_snaplockCreate(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "false"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "DISABLED"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.snaplock_type", "ENTERPRISE"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.volume_append_mode_enabled", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"skip_final_backup"}, + }, + { + Config: testAccONTAPVolumeConfig_snaplockUpdate(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "true"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.type", "DAYS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.value", "14"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "ENABLED"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.type", "DAYS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.value", "30"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.type", "MONTHS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.value", "9"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.type", "HOURS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.value", "24"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.snaplock_type", "ENTERPRISE"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.volume_append_mode_enabled", "true"), + ), + }, + }, + }) +} + func TestAccFSxONTAPVolume_snapshotPolicy(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume @@ -672,6 +732,63 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, size)) } +func testAccONTAPVolumeConfig_snaplockCreate(rName string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id + + snaplock_configuration { + snaplock_type = "ENTERPRISE" + } +} +`, rName)) +} + +func testAccONTAPVolumeConfig_snaplockUpdate(rName string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id + + snaplock_configuration { + audit_log_volume = true + privileged_delete = "ENABLED" + snaplock_type = "ENTERPRISE" + volume_append_mode_enabled = true + + autocommit_period { + type = "DAYS" + value = 14 + } + + retention_period { + default_retention { + type = "DAYS" + value = 30 + } + + maximum_retention { + type = "MONTHS" + value = 9 + } + + minimum_retention { + type = "HOURS" + value = 24 + } + } + } +} +`, rName)) +} + func testAccONTAPVolumeConfig_snapshotPolicy(rName, snapshotPolicy string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { From 6dbbc637ad7c20fc3edb15a16da69f461376c955 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 24 Sep 2023 16:30:46 -0400 Subject: [PATCH 31/38] fsx: Fix sweeper compilation errors. --- internal/service/fsx/sweep.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 7fe2bd1681c..d088424cb05 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -291,7 +291,7 @@ func sweepONTAPVolumes(region string) error { continue } - r := ResourceOntapVolume() + r := ResourceONTAPVolume() d := r.Data(nil) d.SetId(aws.StringValue(v.VolumeId)) d.Set("skip_final_backup", true) @@ -393,7 +393,7 @@ func sweepOpenZFSVolume(region string) error { continue } - r := ResourceOpenzfsVolume() + r := ResourceOpenZFSVolume() d := r.Data(nil) d.SetId(aws.StringValue(v.VolumeId)) From 3c3cb183b3e0af60656b6e7e27aa88b39fc3ded3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Sep 2023 13:56:56 -0400 Subject: [PATCH 32/38] r/aws_fsx_ontap_volume: 'snaplock_configuration.autocommit_period' and 'snaplock_configuration.retention_period' are Optional+Computed. --- internal/service/fsx/ontap_volume.go | 41 ++++++++++++++--------- internal/service/fsx/ontap_volume_test.go | 21 +++++++++--- 2 files changed, 42 insertions(+), 20 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 66ac9371d57..691818016fd 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -114,14 +114,17 @@ func ResourceONTAPVolume() *schema.Resource { Default: false, }, "autocommit_period": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(fsx.AutocommitPeriodType_Values(), false), }, "value": { @@ -139,20 +142,24 @@ func ResourceONTAPVolume() *schema.Resource { ValidateFunc: validation.StringInSlice(fsx.PrivilegedDelete_Values(), false), }, "retention_period": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default_retention": { Type: schema.TypeList, - Required: true, + Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), }, "value": { @@ -165,13 +172,15 @@ func ResourceONTAPVolume() *schema.Resource { }, "maximum_retention": { Type: schema.TypeList, - Required: true, + Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), }, "value": { @@ -184,13 +193,15 @@ func ResourceONTAPVolume() *schema.Resource { }, "minimum_retention": { Type: schema.TypeList, - Required: true, + Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), }, "value": { @@ -205,7 +216,7 @@ func ResourceONTAPVolume() *schema.Resource { }, }, "snaplock_type": { - Type: schema.TypeInt, + Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice(fsx.SnaplockType_Values(), false), @@ -658,7 +669,7 @@ func flattenSnaplockConfiguration(apiObject *fsx.SnaplockConfiguration) map[stri } if v := apiObject.AutocommitPeriod; v != nil { - tfMap["nested_attribute_name"] = []interface{}{flattenAutocommitPeriod(v)} + tfMap["autocommit_period"] = []interface{}{flattenAutocommitPeriod(v)} } if v := apiObject.PrivilegedDelete; v != nil { diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 0cb8779aea9..2d4dbc08db5 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -341,9 +341,20 @@ func TestAccFSxONTAPVolume_snaplock(t *testing.T) { testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "false"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.value", "0"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "DISABLED"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.type", "YEARS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.value", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.type", "YEARS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.value", "30"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.type", "YEARS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.value", "0"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.snaplock_type", "ENTERPRISE"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.volume_append_mode_enabled", "false"), ), @@ -364,7 +375,7 @@ func TestAccFSxONTAPVolume_snaplock(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.type", "DAYS"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.value", "14"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "ENABLED"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "PERMANENTLY_DISABLED"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "1"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.#", "1"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.type", "DAYS"), @@ -752,14 +763,14 @@ func testAccONTAPVolumeConfig_snaplockUpdate(rName string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q - junction_path = "/%[1]s" + junction_path = "/snaplock_audit_log" size_in_megabytes = 1024 storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id snaplock_configuration { audit_log_volume = true - privileged_delete = "ENABLED" + privileged_delete = "PERMANENTLY_DISABLED" snaplock_type = "ENTERPRISE" volume_append_mode_enabled = true From 4f687c3cd4b6f35ca12d22f8188488ff1b44aa61 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Sep 2023 14:01:52 -0400 Subject: [PATCH 33/38] Fix markdown-lint 'MD009/no-trailing-spaces Trailing spaces [Expected: 0 or 2; Actual: 1]'. --- website/docs/r/fsx_ontap_volume.html.markdown | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index 77b41b3ff94..c7784a285df 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -78,8 +78,7 @@ This resource supports the following arguments: ### SnapLock Retention Period -* `default_retention` - (Required) The retention period assigned to a write once, read many (WORM) file by default -if an explicit retention period is not set for an FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to the minimum retention period and less than or equal to the maximum retention period. See [Retention Period](#retention-period) below. +* `default_retention` - (Required) The retention period assigned to a write once, read many (WORM) file by default if an explicit retention period is not set for an FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to the minimum retention period and less than or equal to the maximum retention period. See [Retention Period](#retention-period) below. * `maximum_retention` - (Required) The longest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. * `minimum_retention` - (Required) The shortest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. From efbdb7aee027026b682e5b6206168f6369bc29f2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Sep 2023 15:22:46 -0400 Subject: [PATCH 34/38] r/aws_fsx_openzfs_volume: Add 'delete_volume_options' argument. --- .changelog/32530.txt | 4 +++ internal/service/fsx/openzfs_volume.go | 29 ++++++++++++++++--- internal/service/fsx/openzfs_volume_test.go | 15 ++++++++-- .../docs/r/fsx_openzfs_volume.html.markdown | 1 + 4 files changed, 43 insertions(+), 6 deletions(-) diff --git a/.changelog/32530.txt b/.changelog/32530.txt index 32a51c986a9..de3de23c7b2 100644 --- a/.changelog/32530.txt +++ b/.changelog/32530.txt @@ -5,3 +5,7 @@ resource/aws_fsx_ontap_volume: Add `copy_tags_to_backups` and `snapshot_policy` ```release-note:enhancement resource/aws_fsx_ontap_volume: Add `snaplock_configuration` configuration block to support [SnapLock](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snaplock.html) ``` + +```release-note:enhancement +resource/aws_fsx_openzfs_volume: Add `delete_volume_options` argument +``` \ No newline at end of file diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index 98b8865d84d..e71216f4390 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -35,7 +35,11 @@ func ResourceOpenZFSVolume() *schema.Resource { DeleteWithoutTimeout: resourceOpenZFSVolumeDelete, Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("delete_volume_options", nil) + + return []*schema.ResourceData{d}, nil + }, }, Timeouts: &schema.ResourceTimeout{ @@ -60,6 +64,15 @@ func ResourceOpenZFSVolume() *schema.Resource { Default: "NONE", ValidateFunc: validation.StringInSlice(fsx.OpenZFSDataCompressionType_Values(), false), }, + "delete_volume_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(fsx.DeleteFileSystemOpenZFSOption_Values(), false), + }, + }, "name": { Type: schema.TypeString, Required: true, @@ -372,10 +385,18 @@ func resourceOpenZFSVolumeDelete(ctx context.Context, d *schema.ResourceData, me var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx for OpenZFS Volume: %s", d.Id()) - _, err := conn.DeleteVolumeWithContext(ctx, &fsx.DeleteVolumeInput{ + input := &fsx.DeleteVolumeInput{ VolumeId: aws.String(d.Id()), - }) + } + + if v, ok := d.GetOk("delete_volume_options"); ok && len(v.([]interface{})) > 0 { + input.OpenZFSConfiguration = &fsx.DeleteVolumeOpenZFSConfiguration{ + Options: flex.ExpandStringList(v.([]interface{})), + } + } + + log.Printf("[DEBUG] Deleting FSx for OpenZFS Volume: %s", d.Id()) + _, err := conn.DeleteVolumeWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { return diags diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index c7d882a9429..cce5ec5cf00 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -39,6 +39,7 @@ func TestAccFSxOpenZFSVolume_basic(t *testing.T) { acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), resource.TestCheckResourceAttr(resourceName, "data_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.#", "0"), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "1"), @@ -181,23 +182,31 @@ func TestAccFSxOpenZFSVolume_copyTags(t *testing.T) { Config: testAccOpenZFSVolumeConfig_copyTags(rName, "key1", "value1", "true"), Check: resource.ComposeTestCheckFunc( testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "true"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), - resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "true"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "delete_volume_options", + }, }, { Config: testAccOpenZFSVolumeConfig_copyTags(rName, "key1", "value1", "false"), Check: resource.ComposeTestCheckFunc( testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), testAccCheckOpenZFSVolumeRecreated(&volume1, &volume2), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), }, }, @@ -642,6 +651,8 @@ resource "aws_fsx_openzfs_volume" "test" { tags = { %[2]q = %[3]q } + + delete_volume_options = ["DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"] } `, rName, tagKey1, tagValue1, copyTags)) } diff --git a/website/docs/r/fsx_openzfs_volume.html.markdown b/website/docs/r/fsx_openzfs_volume.html.markdown index bb6b71adeb1..40b7d4def58 100644 --- a/website/docs/r/fsx_openzfs_volume.html.markdown +++ b/website/docs/r/fsx_openzfs_volume.html.markdown @@ -29,6 +29,7 @@ This resource supports the following arguments: * `origin_snapshot` - (Optional) The ARN of the source snapshot to create the volume from. * `copy_tags_to_snapshots` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. * `data_compression_type` - (Optional) Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. +* `delete_volume_options` - (Optional) Whether to delete all child volumes and snapshots. Valid values: `DELETE_CHILD_VOLUMES_AND_SNAPSHOTS`. This configuration must be applied separately before attempting to delete the resource to have the desired behavior.. * `nfs_exports` - (Optional) NFS export configuration for the root volume. Exactly 1 item. See [NFS Exports](#nfs-exports) Below. * `read_only` - (Optional) specifies whether the volume is read-only. Default is false. * `record_size_kib` - (Optional) The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are `4`, `8`, `16`, `32`, `64`, `128`, `256`, `512`, or `1024` KiB. The default is `128` KiB. From ad585853d2618654051a3a095fc582f1d3ebc616 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Sep 2023 15:23:01 -0400 Subject: [PATCH 35/38] Acceptance test output: % ACCTEST_TIMEOUT=720m make testacc TESTARGS='-run=TestAccFSxOpenZFSVolume_basic\|TestAccFSxOpenZFSVolume_copyTags' PKG=fsx ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/fsx/... -v -count 1 -parallel 20 -run=TestAccFSxOpenZFSVolume_basic\|TestAccFSxOpenZFSVolume_copyTags -timeout 720m === RUN TestAccFSxOpenZFSVolume_basic === PAUSE TestAccFSxOpenZFSVolume_basic === RUN TestAccFSxOpenZFSVolume_copyTags === PAUSE TestAccFSxOpenZFSVolume_copyTags === CONT TestAccFSxOpenZFSVolume_basic === CONT TestAccFSxOpenZFSVolume_copyTags --- PASS: TestAccFSxOpenZFSVolume_basic (839.04s) --- PASS: TestAccFSxOpenZFSVolume_copyTags (960.33s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/fsx 965.817s From a0693f0afe43d040b8f4256f9718b183f22180e9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Sep 2023 16:51:06 -0400 Subject: [PATCH 36/38] r/aws_opensearch_outbound_connection: Cosmetics. --- .changelog/32990.txt | 2 +- .../service/opensearch/outbound_connection.go | 260 ++++++++++-------- ...ensearch_outbound_connection.html.markdown | 2 +- 3 files changed, 153 insertions(+), 111 deletions(-) diff --git a/.changelog/32990.txt b/.changelog/32990.txt index 2292aff4c49..221d331f512 100644 --- a/.changelog/32990.txt +++ b/.changelog/32990.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_opensearch_outbound_connection: Add `connection_properties`, `connection_mode` and `accept_connection` arguments to support specifying connection mode. +resource/aws_opensearch_outbound_connection: Add `connection_properties`, `connection_mode` and `accept_connection` arguments ``` diff --git a/internal/service/opensearch/outbound_connection.go b/internal/service/opensearch/outbound_connection.go index 7a4dc9065dc..5e0c4fe64ab 100644 --- a/internal/service/opensearch/outbound_connection.go +++ b/internal/service/opensearch/outbound_connection.go @@ -6,7 +6,6 @@ package opensearch import ( "context" "errors" - "fmt" "log" "time" @@ -18,14 +17,44 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) // @SDKResource("aws_opensearch_outbound_connection") func ResourceOutboundConnection() *schema.Resource { + outboundConnectionDomainInfoSchema := func() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "owner_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + } + } + return &schema.Resource{ CreateWithoutTimeout: resourceOutboundConnectionCreate, ReadWithoutTimeout: resourceOutboundConnectionRead, DeleteWithoutTimeout: resourceOutboundConnectionDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -36,6 +65,12 @@ func ResourceOutboundConnection() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "accept_connection": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, "connection_alias": { Type: schema.TypeString, Required: true, @@ -76,18 +111,12 @@ func ResourceOutboundConnection() *schema.Resource { }, }, }, - "local_domain_info": outboundConnectionDomainInfoSchema(), - "remote_domain_info": outboundConnectionDomainInfoSchema(), "connection_status": { Type: schema.TypeString, Computed: true, }, - "accept_connection": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, + "local_domain_info": outboundConnectionDomainInfoSchema(), + "remote_domain_info": outboundConnectionDomainInfoSchema(), }, } } @@ -95,29 +124,25 @@ func ResourceOutboundConnection() *schema.Resource { func resourceOutboundConnectionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*conns.AWSClient).OpenSearchConn(ctx) - // Create the Outbound Connection - createOpts := &opensearchservice.CreateOutboundConnectionInput{ - ConnectionAlias: aws.String(d.Get("connection_alias").(string)), + connectionAlias := d.Get("connection_alias").(string) + input := &opensearchservice.CreateOutboundConnectionInput{ + ConnectionAlias: aws.String(connectionAlias), ConnectionMode: aws.String(d.Get("connection_mode").(string)), ConnectionProperties: expandOutboundConnectionConnectionProperties(d.Get("connection_properties").([]interface{})), LocalDomainInfo: expandOutboundConnectionDomainInfo(d.Get("local_domain_info").([]interface{})), RemoteDomainInfo: expandOutboundConnectionDomainInfo(d.Get("remote_domain_info").([]interface{})), } - log.Printf("[DEBUG] Outbound Connection Create options: %#v", createOpts) + output, err := conn.CreateOutboundConnectionWithContext(ctx, input) - resp, err := conn.CreateOutboundConnectionWithContext(ctx, createOpts) if err != nil { - return diag.Errorf("creating Outbound Connection: %s", err) + return diag.Errorf("creating OpenSearch Outbound Connection (%s): %s", connectionAlias, err) } - // Get the ID and store it - d.SetId(aws.StringValue(resp.ConnectionId)) - log.Printf("[INFO] Outbound Connection ID: %s", d.Id()) + d.SetId(aws.StringValue(output.ConnectionId)) - err = outboundConnectionWaitUntilAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) - if err != nil { - return diag.Errorf("waiting for Outbound Connection to become available: %s", err) + if _, err := waitOutboundConnectionCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return diag.Errorf("waiting for OpenSearch Outbound Connection (%s) create: %s", d.Id(), err) } if d.Get("accept_connection").(bool) { @@ -142,27 +167,24 @@ func resourceOutboundConnectionCreate(ctx context.Context, d *schema.ResourceDat func resourceOutboundConnectionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*conns.AWSClient).OpenSearchConn(ctx) - ccscRaw, statusCode, err := outboundConnectionRefreshState(ctx, conn, d.Id())() + connection, err := FindOutboundConnectionByID(ctx, conn, d.Id()) - if err != nil { - return diag.Errorf("reading Outbound Connection: %s", err) - } - - ccsc := ccscRaw.(*opensearchservice.OutboundConnection) - log.Printf("[DEBUG] Outbound Connection response: %#v", ccsc) - - if !d.IsNewResource() && statusCode == opensearchservice.OutboundConnectionStatusCodeDeleted { - log.Printf("[INFO] Outbound Connection (%s) deleted, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] OpenSearch Outbound Connection (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - d.Set("connection_alias", ccsc.ConnectionAlias) - d.Set("connection_mode", ccsc.ConnectionMode) - d.Set("connection_properties", flattenOutboundConnectionConnectionProperties(ccsc.ConnectionProperties)) - d.Set("remote_domain_info", flattenOutboundConnectionDomainInfo(ccsc.RemoteDomainInfo)) - d.Set("local_domain_info", flattenOutboundConnectionDomainInfo(ccsc.LocalDomainInfo)) - d.Set("connection_status", statusCode) + if err != nil { + return diag.Errorf("reading OpenSearch Outbound Connection (%s): %s", d.Id(), err) + } + + d.Set("connection_alias", connection.ConnectionAlias) + d.Set("connection_mode", connection.ConnectionMode) + d.Set("connection_properties", flattenOutboundConnectionConnectionProperties(connection.ConnectionProperties)) + d.Set("connection_status", connection.ConnectionStatus.StatusCode) + d.Set("remote_domain_info", flattenOutboundConnectionDomainInfo(connection.RemoteDomainInfo)) + d.Set("local_domain_info", flattenOutboundConnectionDomainInfo(connection.LocalDomainInfo)) return nil } @@ -180,61 +202,99 @@ func resourceOutboundConnectionDelete(ctx context.Context, d *schema.ResourceDat } if err != nil { - return diag.Errorf("deleting Outbound Connection (%s): %s", d.Id(), err) + return diag.Errorf("deleting OpenSearch Outbound Connection (%s): %s", d.Id(), err) } - if err := waitForOutboundConnectionDeletion(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return diag.Errorf("waiting for VPC Peering Connection (%s) to be deleted: %s", d.Id(), err) + if _, err := waitOutboundConnectionDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return diag.Errorf("waiting for OpenSearch Outbound Connection (%s) delete: %s", d.Id(), err) } return nil } -func outboundConnectionRefreshState(ctx context.Context, conn *opensearchservice.OpenSearchService, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeOutboundConnectionsWithContext(ctx, &opensearchservice.DescribeOutboundConnectionsInput{ - Filters: []*opensearchservice.Filter{ - { - Name: aws.String("connection-id"), - Values: []*string{aws.String(id)}, - }, +func FindOutboundConnectionByID(ctx context.Context, conn *opensearchservice.OpenSearchService, id string) (*opensearchservice.OutboundConnection, error) { + input := &opensearchservice.DescribeOutboundConnectionsInput{ + Filters: []*opensearchservice.Filter{ + { + Name: aws.String("connection-id"), + Values: aws.StringSlice([]string{id}), }, - }) - if err != nil { - return nil, "", err + }, + } + + output, err := findOutboundConnection(ctx, conn, input) + + if err != nil { + return nil, err + } + + if output.ConnectionStatus == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + if status := aws.StringValue(output.ConnectionStatus.StatusCode); status == opensearchservice.OutboundConnectionStatusCodeDeleted { + return nil, &retry.NotFoundError{ + Message: status, + LastRequest: input, } + } - if resp == nil || resp.Connections == nil || - len(resp.Connections) == 0 || resp.Connections[0] == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our connection yet. Return an empty state. - return nil, "", nil + return output, err +} + +func findOutboundConnection(ctx context.Context, conn *opensearchservice.OpenSearchService, input *opensearchservice.DescribeOutboundConnectionsInput) (*opensearchservice.OutboundConnection, error) { + output, err := findOutboundConnections(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findOutboundConnections(ctx context.Context, conn *opensearchservice.OpenSearchService, input *opensearchservice.DescribeOutboundConnectionsInput) ([]*opensearchservice.OutboundConnection, error) { + var output []*opensearchservice.OutboundConnection + + err := conn.DescribeOutboundConnectionsPagesWithContext(ctx, input, func(page *opensearchservice.DescribeOutboundConnectionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Connections { + if v != nil { + output = append(output, v) + } } - ccsc := resp.Connections[0] - if ccsc.ConnectionStatus == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our connection yet. Return an empty state. + + return !lastPage + }) + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusOutboundConnection(ctx context.Context, conn *opensearchservice.OpenSearchService, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindOutboundConnectionByID(ctx, conn, id) + + if tfresource.NotFound(err) { return nil, "", nil } - statusCode := aws.StringValue(ccsc.ConnectionStatus.StatusCode) - // A Outbound Connection can exist in a failed state, - // thus we short circuit before the time out would occur. - if statusCode == opensearchservice.OutboundConnectionStatusCodeValidationFailed { - return nil, statusCode, errors.New(aws.StringValue(ccsc.ConnectionStatus.Message)) + if err != nil { + return nil, "", err } - return ccsc, statusCode, nil + return output, aws.StringValue(output.ConnectionStatus.StatusCode), nil } } -func outboundConnectionWaitUntilAvailable(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) error { - log.Printf("[DEBUG] Waiting for Outbound Connection (%s) to become available.", id) +func waitOutboundConnectionCreated(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) (*opensearchservice.OutboundConnection, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{ - opensearchservice.OutboundConnectionStatusCodeValidating, - opensearchservice.OutboundConnectionStatusCodeProvisioning, - }, + Pending: []string{opensearchservice.OutboundConnectionStatusCodeValidating, opensearchservice.OutboundConnectionStatusCodeProvisioning}, Target: []string{ opensearchservice.OutboundConnectionStatusCodePendingAcceptance, opensearchservice.OutboundConnectionStatusCodeActive, @@ -242,16 +302,22 @@ func outboundConnectionWaitUntilAvailable(ctx context.Context, conn *opensearchs opensearchservice.OutboundConnectionStatusCodeRejected, opensearchservice.OutboundConnectionStatusCodeValidationFailed, }, - Refresh: outboundConnectionRefreshState(ctx, conn, id), + Refresh: statusOutboundConnection(ctx, conn, id), Timeout: timeout, } - if _, err := stateConf.WaitForStateContext(ctx); err != nil { - return fmt.Errorf("waiting for Outbound Connection (%s) to become available: %s", id, err) + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*opensearchservice.OutboundConnection); ok { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.ConnectionStatus.Message))) + + return output, err } - return nil + + return nil, err } -func waitForOutboundConnectionDeletion(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) error { +func waitOutboundConnectionDeleted(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) (*opensearchservice.OutboundConnection, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ opensearchservice.OutboundConnectionStatusCodeActive, @@ -259,44 +325,20 @@ func waitForOutboundConnectionDeletion(ctx context.Context, conn *opensearchserv opensearchservice.OutboundConnectionStatusCodeDeleting, opensearchservice.OutboundConnectionStatusCodeRejecting, }, - Target: []string{ - opensearchservice.OutboundConnectionStatusCodeDeleted, - }, - Refresh: outboundConnectionRefreshState(ctx, conn, id), + Target: []string{}, + Refresh: statusOutboundConnection(ctx, conn, id), Timeout: timeout, } - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) - return err -} + if output, ok := outputRaw.(*opensearchservice.OutboundConnection); ok { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.ConnectionStatus.Message))) -func outboundConnectionDomainInfoSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "owner_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "domain_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, + return output, err } + + return nil, err } func expandOutboundConnectionDomainInfo(vOptions []interface{}) *opensearchservice.DomainInformationContainer { diff --git a/website/docs/r/opensearch_outbound_connection.html.markdown b/website/docs/r/opensearch_outbound_connection.html.markdown index febe429ac8e..ded5205d4d4 100644 --- a/website/docs/r/opensearch_outbound_connection.html.markdown +++ b/website/docs/r/opensearch_outbound_connection.html.markdown @@ -42,7 +42,7 @@ This resource supports the following arguments: * `connection_alias` - (Required, Forces new resource) Specifies the connection alias that will be used by the customer for this connection. * `connection_mode` - (Required, Forces new resource) Specifies the connection mode. Accepted values are `DIRECT` or `VPC_ENDPOINT`. * `accept_connection` - (Optional, Forces new resource) Accepts the connection. -* `connection_properties` - (Optional, Forces new resource) Configuration block for the outbound connection.. +* `connection_properties` - (Optional, Forces new resource) Configuration block for the outbound connection. * `local_domain_info` - (Required, Forces new resource) Configuration block for the local Opensearch domain. * `remote_domain_info` - (Required, Forces new resource) Configuration block for the remote Opensearch domain. From 8361bb1065e76fe5bddfa64e8864ec5c27801394 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Sep 2023 18:28:41 -0400 Subject: [PATCH 37/38] Fix golangci-lint 'unparam'. --- internal/service/opensearch/inbound_connection_accepter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/opensearch/inbound_connection_accepter.go b/internal/service/opensearch/inbound_connection_accepter.go index adddae7063d..8eb6a21dc58 100644 --- a/internal/service/opensearch/inbound_connection_accepter.go +++ b/internal/service/opensearch/inbound_connection_accepter.go @@ -216,7 +216,7 @@ func statusInboundConnection(ctx context.Context, conn *opensearchservice.OpenSe } } -func waitInboundConnectionAccepted(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) (*opensearchservice.InboundConnection, error) { +func waitInboundConnectionAccepted(ctx context.Context, conn *opensearchservice.OpenSearchService, id string, timeout time.Duration) (*opensearchservice.InboundConnection, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{opensearchservice.InboundConnectionStatusCodeProvisioning, opensearchservice.InboundConnectionStatusCodeApproved}, Target: []string{opensearchservice.InboundConnectionStatusCodeActive}, From 0ed7cef3fbe4c5343a6791947b5bcd4cee036906 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Sep 2023 18:31:09 -0400 Subject: [PATCH 38/38] r/aws_fsx_ontap_volume: Add 'bypass_snaplock_enterprise_retention' argument. --- .changelog/32530.txt | 2 +- internal/service/fsx/ontap_volume.go | 9 +- internal/service/fsx/ontap_volume_test.go | 99 +++++++++++-------- website/docs/r/fsx_ontap_volume.html.markdown | 1 + 4 files changed, 66 insertions(+), 45 deletions(-) diff --git a/.changelog/32530.txt b/.changelog/32530.txt index de3de23c7b2..c092c4bd830 100644 --- a/.changelog/32530.txt +++ b/.changelog/32530.txt @@ -3,7 +3,7 @@ resource/aws_fsx_ontap_volume: Add `copy_tags_to_backups` and `snapshot_policy` ``` ```release-note:enhancement -resource/aws_fsx_ontap_volume: Add `snaplock_configuration` configuration block to support [SnapLock](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snaplock.html) +resource/aws_fsx_ontap_volume: Add `bypass_snaplock_enterprise_retention` argument and `snaplock_configuration` configuration block to support [SnapLock](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snaplock.html) ``` ```release-note:enhancement diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 691818016fd..6ef01fa1627 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -38,6 +38,7 @@ func ResourceONTAPVolume() *schema.Resource { Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("bypass_snaplock_enterprise_retention", false) d.Set("skip_final_backup", false) return []*schema.ResourceData{d}, nil @@ -55,6 +56,11 @@ func ResourceONTAPVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bypass_snaplock_enterprise_retention": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "copy_tags_to_backups": { Type: schema.TypeBool, Optional: true, @@ -473,7 +479,8 @@ func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP Volume: %s", d.Id()) _, err := conn.DeleteVolumeWithContext(ctx, &fsx.DeleteVolumeInput{ OntapConfiguration: &fsx.DeleteVolumeOntapConfiguration{ - SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), + BypassSnaplockEnterpriseRetention: aws.Bool(d.Get("bypass_snaplock_enterprise_retention").(bool)), + SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), }, VolumeId: aws.String(d.Id()), }) diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 2d4dbc08db5..02fdaa004a1 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -37,6 +37,7 @@ func TestAccFSxONTAPVolume_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), + resource.TestCheckResourceAttr(resourceName, "bypass_snaplock_enterprise_retention", "false"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), resource.TestCheckResourceAttrSet(resourceName, "file_system_id"), resource.TestCheckResourceAttr(resourceName, "junction_path", fmt.Sprintf("/%[1]s", rName)), @@ -50,16 +51,15 @@ func TestAccFSxONTAPVolume_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "true"), resource.TestCheckResourceAttrSet(resourceName, "storage_virtual_machine_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "tiering_policy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tiering_policy.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "uuid"), resource.TestCheckResourceAttr(resourceName, "volume_type", "ONTAP"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -112,7 +112,7 @@ func TestAccFSxONTAPVolume_copyTagsToBackups(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_copyTagsToBackups(rName, false), @@ -152,7 +152,7 @@ func TestAccFSxONTAPVolume_junctionPath(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_junctionPath(rName, jPath2), @@ -191,7 +191,7 @@ func TestAccFSxONTAPVolume_name(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_basic(rName2), @@ -228,7 +228,7 @@ func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, }, }) @@ -258,7 +258,7 @@ func TestAccFSxONTAPVolume_securityStyle(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_securityStyle(rName, "NTFS"), @@ -308,7 +308,7 @@ func TestAccFSxONTAPVolume_size(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_size(rName, size2), @@ -325,7 +325,7 @@ func TestAccFSxONTAPVolume_size(t *testing.T) { func TestAccFSxONTAPVolume_snaplock(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1 /*, volume2*/ fsx.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) @@ -339,6 +339,7 @@ func TestAccFSxONTAPVolume_snaplock(t *testing.T) { Config: testAccONTAPVolumeConfig_snaplockCreate(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "bypass_snaplock_enterprise_retention", "true"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "false"), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), @@ -363,33 +364,39 @@ func TestAccFSxONTAPVolume_snaplock(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, - }, - { - Config: testAccONTAPVolumeConfig_snaplockUpdate(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), - testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "true"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.type", "DAYS"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.value", "14"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "PERMANENTLY_DISABLED"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "1"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.#", "1"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.type", "DAYS"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.value", "30"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.#", "1"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.type", "MONTHS"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.value", "9"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.#", "1"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.type", "HOURS"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.value", "24"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.snaplock_type", "ENTERPRISE"), - resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.volume_append_mode_enabled", "true"), - ), - }, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + }, + /* + See https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/how-snaplock-works.html#snaplock-audit-log-volume. + > The minimum retention period for a SnapLock audit log volume is six months. Until this retention period expires, the SnapLock audit log volume and the SVM and file system that are associated with it can't be deleted even if the volume was created in SnapLock Enterprise mode. + + { + Config: testAccONTAPVolumeConfig_snaplockUpdate(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "bypass_snaplock_enterprise_retention", "true"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "true"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.type", "DAYS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.value", "14"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "PERMANENTLY_DISABLED"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.type", "DAYS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.value", "30"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.type", "MONTHS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.value", "9"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.type", "HOURS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.value", "24"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.snaplock_type", "ENTERPRISE"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.volume_append_mode_enabled", "true"), + ), + }, + */ }, }) } @@ -420,7 +427,7 @@ func TestAccFSxONTAPVolume_snapshotPolicy(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy2), @@ -459,7 +466,7 @@ func TestAccFSxONTAPVolume_storageEfficiency(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_storageEfficiency(rName, false), @@ -498,7 +505,7 @@ func TestAccFSxONTAPVolume_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), @@ -547,7 +554,7 @@ func TestAccFSxONTAPVolume_tieringPolicy(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_tieringPolicy(rName, "SNAPSHOT_ONLY", 10), @@ -755,10 +762,13 @@ resource "aws_fsx_ontap_volume" "test" { snaplock_configuration { snaplock_type = "ENTERPRISE" } + + bypass_snaplock_enterprise_retention = true } `, rName)) } +/* func testAccONTAPVolumeConfig_snaplockUpdate(rName string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { @@ -796,9 +806,12 @@ resource "aws_fsx_ontap_volume" "test" { } } } + + bypass_snaplock_enterprise_retention = true } `, rName)) } +*/ func testAccONTAPVolumeConfig_snapshotPolicy(rName, snapshotPolicy string) string { return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index c7784a285df..ab734089c3a 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -49,6 +49,7 @@ resource "aws_fsx_ontap_volume" "test" { This resource supports the following arguments: * `name` - (Required) The name of the Volume. You can use a maximum of 203 alphanumeric characters, plus the underscore (_) special character. +* `bypass_snaplock_enterprise_retention` - (Optional) Setting this to `true` allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to `false`. * `junction_path` - (Optional) Specifies the location in the storage virtual machine's namespace where the volume is mounted. The junction_path must have a leading forward slash, such as `/vol3` * `ontap_volume_type` - (Optional) Specifies the type of volume, valid values are `RW`, `DP`. Default value is `RW`. These can be set by the ONTAP CLI or API. This setting is used as part of migration and replication [Migrating to Amazon FSx for NetApp ONTAP](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/migrating-fsx-ontap.html)