diff --git a/.changelog/32908.txt b/.changelog/32908.txt new file mode 100644 index 00000000000..2efeea10795 --- /dev/null +++ b/.changelog/32908.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_dms_replication_config +``` \ No newline at end of file diff --git a/.changelog/33537.txt b/.changelog/33537.txt new file mode 100644 index 00000000000..39ec840fa83 --- /dev/null +++ b/.changelog/33537.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3_bucket_policy: Fix intermittent `couldn't find resource` errors on resource Create +``` \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index ff7e75e4ef1..7c3cdf1ef92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,13 @@ ## 5.19.0 (Unreleased) + +FEATURES: + +* **New Resource:** `aws_dms_replication_config` ([#32908](https://github.com/hashicorp/terraform-provider-aws/issues/32908)) + +BUG FIXES: + +* resource/aws_s3_bucket_policy: Fix intermittent `couldn't find resource` errors on resource Create ([#33537](https://github.com/hashicorp/terraform-provider-aws/issues/33537)) + ## 5.18.0 (September 21, 2023) FEATURES: diff --git a/internal/service/dms/consts.go b/internal/service/dms/consts.go index 4feea88f1c6..137f877637a 100644 --- a/internal/service/dms/consts.go +++ b/internal/service/dms/consts.go @@ -134,6 +134,27 @@ func encryptionMode_Values() []string { } } +const ( + replicationStatusCreated = "created" + replicationStatusReady = "ready" + replicationStatusRunning = "running" + replicationStatusStopping = "stopping" + replicationStatusStopped = "stopped" + replicationStatusFailed = "failed" + replicationStatusInitialising = "initializing" + replicationStatusMetadataResources = "preparing_metadata_resources" + replicationStatusTestingConnection = "testing_connection" + replicationStatusFetchingMetadata = "fetching_metadata" + replicationStatusCalculatingCapacity = "calculating_capacity" + replicationStatusProvisioningCapacity = "provisioning_capacity" + replicationStatusReplicationStarting = "replication_starting" +) + +const ( + replicationTypeValueStartReplication = "creating" + replicationTypeValueResumeProcessing = "resume-processing" +) + const ( networkTypeDual = "DUAL" networkTypeIPv4 = "IPV4" diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go new file mode 100644 index 00000000000..0e28a878388 --- /dev/null +++ b/internal/service/dms/replication_config.go @@ -0,0 +1,657 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dms + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_dms_replication_config", name="Replication Config") +// @Tags(identifierAttribute="id") +func ResourceReplicationConfig() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceReplicationConfigCreate, + ReadWithoutTimeout: resourceReplicationConfigRead, + UpdateWithoutTimeout: resourceReplicationConfigUpdate, + DeleteWithoutTimeout: resourceReplicationConfigDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "compute_config": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "dns_name_servers": { + Type: schema.TypeString, + Optional: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "max_capacity_units": { + Type: schema.TypeInt, + Optional: true, + }, + "min_capacity_units": { + Type: schema.TypeInt, + Optional: true, + }, + "multi_az": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "preferred_maintenance_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: verify.ValidOnceAWeekWindowFormat, + }, + "replication_subnet_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validReplicationSubnetGroupID, + }, + "vpc_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "replication_config_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "replication_settings": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "replication_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(dms.MigrationTypeValue_Values(), false), + }, + "resource_identifier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "source_endpoint_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "start_replication": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "supplemental_settings": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + }, + "table_mappings": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "target_endpoint_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + }, + + CustomizeDiff: verify.SetTagsDiff, + } +} + +func resourceReplicationConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DMSConn(ctx) + + replicationConfigID := d.Get("replication_config_identifier").(string) + input := &dms.CreateReplicationConfigInput{ + ReplicationConfigIdentifier: aws.String(replicationConfigID), + ReplicationType: aws.String(d.Get("replication_type").(string)), + SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), + TableMappings: aws.String(d.Get("table_mappings").(string)), + Tags: getTagsIn(ctx), + TargetEndpointArn: aws.String(d.Get("target_endpoint_arn").(string)), + } + + if v, ok := d.GetOk("compute_config"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ComputeConfig = expandComputeConfigInput(v.([]interface{})[0].(map[string]interface{})) + } + + if v, ok := d.GetOk("replication_settings"); ok { + input.ReplicationSettings = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_identifier"); ok { + input.ResourceIdentifier = aws.String(v.(string)) + } + + if v, ok := d.GetOk("supplemental_settings"); ok { + input.SupplementalSettings = aws.String(v.(string)) + } + + output, err := conn.CreateReplicationConfigWithContext(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating DMS Replication Config (%s): %s", replicationConfigID, err) + } + + d.SetId(aws.StringValue(output.ReplicationConfig.ReplicationConfigArn)) + + if d.Get("start_replication").(bool) { + if err := startReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + + return resourceReplicationConfigRead(ctx, d, meta) +} + +func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DMSConn(ctx) + + replicationConfig, err := FindReplicationConfigByARN(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] DMS Replication Config (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading DMS Replication Config (%s): %s", d.Id(), err) + } + + d.Set("arn", replicationConfig.ReplicationConfigArn) + if err := d.Set("compute_config", flattenComputeConfig(replicationConfig.ComputeConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) + } + d.Set("replication_config_identifier", replicationConfig.ReplicationConfigIdentifier) + d.Set("replication_settings", replicationConfig.ReplicationSettings) + d.Set("replication_type", replicationConfig.ReplicationType) + d.Set("source_endpoint_arn", replicationConfig.SourceEndpointArn) + d.Set("supplemental_settings", replicationConfig.SupplementalSettings) + d.Set("table_mappings", replicationConfig.TableMappings) + d.Set("target_endpoint_arn", replicationConfig.TargetEndpointArn) + + return diags +} + +func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DMSConn(ctx) + + if d.HasChangesExcept("tags", "tags_all", "start_replication") { + if err := stopReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + input := &dms.ModifyReplicationConfigInput{ + ReplicationConfigArn: aws.String(d.Id()), + } + + if d.HasChange("compute_config") { + if v, ok := d.GetOk("compute_config"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ComputeConfig = expandComputeConfigInput(v.([]interface{})[0].(map[string]interface{})) + } + } + + if d.HasChange("replication_settings") { + input.ReplicationSettings = aws.String(d.Get("replication_settings").(string)) + } + + if d.HasChange("replication_type") { + input.ReplicationType = aws.String(d.Get("replication_type").(string)) + } + + if d.HasChange("source_endpoint_arn") { + input.SourceEndpointArn = aws.String(d.Get("source_endpoint_arn").(string)) + } + + if d.HasChange("supplemental_settings") { + input.SupplementalSettings = aws.String(d.Get("supplemental_settings").(string)) + } + + if d.HasChange("table_mappings") { + input.TableMappings = aws.String(d.Get("table_mappings").(string)) + } + + if d.HasChange("target_endpoint_arn") { + input.TargetEndpointArn = aws.String(d.Get("target_endpoint_arn").(string)) + } + + _, err := conn.ModifyReplicationConfigWithContext(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating DMS Replication Config (%s): %s", d.Id(), err) + } + + if d.Get("start_replication").(bool) { + if err := startReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + } + + if d.HasChange("start_replication") { + var f func(context.Context, *dms.DatabaseMigrationService, string, time.Duration) error + if d.Get("start_replication").(bool) { + f = startReplication + } else { + f = stopReplication + } + if err := f(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + + return append(diags, resourceReplicationConfigRead(ctx, d, meta)...) +} + +func resourceReplicationConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DMSConn(ctx) + + if err := stopReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + log.Printf("[DEBUG] Deleting DMS Replication Config: %s", d.Id()) + _, err := conn.DeleteReplicationConfigWithContext(ctx, &dms.DeleteReplicationConfigInput{ + ReplicationConfigArn: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting DMS Replication Config (%s): %s", d.Id(), err) + } + + if _, err := waitReplicationDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Config (%s) delete: %s", d.Id(), err) + } + + return diags +} + +func FindReplicationConfigByARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) (*dms.ReplicationConfig, error) { + input := &dms.DescribeReplicationConfigsInput{ + Filters: []*dms.Filter{{ + Name: aws.String("replication-config-arn"), + Values: aws.StringSlice([]string{arn}), + }}, + } + + return findReplicationConfig(ctx, conn, input) +} + +func findReplicationConfig(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationConfigsInput) (*dms.ReplicationConfig, error) { + output, err := findReplicationConfigs(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findReplicationConfigs(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationConfigsInput) ([]*dms.ReplicationConfig, error) { + var output []*dms.ReplicationConfig + + err := conn.DescribeReplicationConfigsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationConfigsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.ReplicationConfigs { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func findReplicationByReplicationConfigARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) (*dms.Replication, error) { + input := &dms.DescribeReplicationsInput{ + Filters: []*dms.Filter{{ + Name: aws.String("replication-config-arn"), + Values: aws.StringSlice([]string{arn}), + }}, + } + + return findReplication(ctx, conn, input) +} + +func findReplication(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationsInput) (*dms.Replication, error) { + output, err := findReplications(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findReplications(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationsInput) ([]*dms.Replication, error) { + var output []*dms.Replication + + err := conn.DescribeReplicationsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Replications { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findReplicationByReplicationConfigARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status), nil + } +} + +func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + replicationStatusReady, + replicationStatusInitialising, + replicationStatusMetadataResources, + replicationStatusTestingConnection, + replicationStatusFetchingMetadata, + replicationStatusCalculatingCapacity, + replicationStatusProvisioningCapacity, + replicationStatusReplicationStarting, + }, + Target: []string{replicationStatusRunning, replicationStatusStopped}, + Refresh: statusReplication(ctx, conn, arn), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.Replication); ok { + return output, err + } + + return nil, err +} + +func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{replicationStatusStopping, replicationStatusRunning}, + Target: []string{replicationStatusStopped}, + Refresh: statusReplication(ctx, conn, arn), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 60 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.Replication); ok { + return output, err + } + + return nil, err +} + +func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{replicationTaskStatusDeleting, replicationStatusStopped}, + Target: []string{}, + Refresh: statusReplication(ctx, conn, arn), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.Replication); ok { + return output, err + } + + return nil, err +} + +func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) error { + replication, err := findReplicationByReplicationConfigARN(ctx, conn, arn) + + if err != nil { + return fmt.Errorf("reading DMS Replication Config (%s) replication: %s", arn, err) + } + + replicationStatus := aws.StringValue(replication.Status) + + if replicationStatus == replicationStatusRunning { + return nil + } + + startReplicationType := replicationTypeValueStartReplication + if replicationStatus != replicationStatusReady { + startReplicationType = replicationTypeValueResumeProcessing + } + input := &dms.StartReplicationInput{ + ReplicationConfigArn: aws.String(arn), + StartReplicationType: aws.String(startReplicationType), + } + + _, err = conn.StartReplicationWithContext(ctx, input) + + if err != nil { + return fmt.Errorf("starting DMS Serverless Replication (%s): %w", arn, err) + } + + if _, err := waitReplicationRunning(ctx, conn, arn, timeout); err != nil { + return fmt.Errorf("waiting for DMS Serverless Replication (%s) start: %w", arn, err) + } + + return nil +} + +func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) error { + replication, err := findReplicationByReplicationConfigARN(ctx, conn, arn) + + if err != nil { + return fmt.Errorf("reading DMS Replication Config (%s) replication: %s", arn, err) + } + + replicationStatus := aws.StringValue(replication.Status) + + if replicationStatus == replicationStatusStopped || replicationStatus == replicationStatusCreated || replicationStatus == replicationStatusFailed { + return nil + } + + input := &dms.StopReplicationInput{ + ReplicationConfigArn: aws.String(arn), + } + + _, err = conn.StopReplicationWithContext(ctx, input) + + if err != nil { + return fmt.Errorf("stopping DMS Serverless Replication (%s): %w", arn, err) + } + + if _, err := waitReplicationStopped(ctx, conn, arn, timeout); err != nil { + return fmt.Errorf("waiting for DMS Serverless Replication (%s) stop: %w", arn, err) + } + + return nil +} + +func flattenComputeConfig(apiObject *dms.ComputeConfig) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "availability_zone": aws.StringValue(apiObject.AvailabilityZone), + "dns_name_servers": aws.StringValue(apiObject.DnsNameServers), + "kms_key_id": aws.StringValue(apiObject.KmsKeyId), + "max_capacity_units": aws.Int64Value(apiObject.MaxCapacityUnits), + "min_capacity_units": aws.Int64Value(apiObject.MinCapacityUnits), + "multi_az": aws.BoolValue(apiObject.MultiAZ), + "preferred_maintenance_window": aws.StringValue(apiObject.PreferredMaintenanceWindow), + "replication_subnet_group_id": aws.StringValue(apiObject.ReplicationSubnetGroupId), + "vpc_security_group_ids": flex.FlattenStringSet(apiObject.VpcSecurityGroupIds), + } + + return []interface{}{tfMap} +} + +func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { + if tfMap == nil { + return nil + } + + apiObject := &dms.ComputeConfig{} + + if v, ok := tfMap["availability_zone"].(string); ok && v != "" { + apiObject.AvailabilityZone = aws.String(v) + } + + if v, ok := tfMap["dns_name_servers"].(string); ok && v != "" { + apiObject.DnsNameServers = aws.String(v) + } + + if v, ok := tfMap["kms_key_id"].(string); ok && v != "" { + apiObject.KmsKeyId = aws.String(v) + } + + if v, ok := tfMap["max_capacity_units"].(int); ok && v != 0 { + apiObject.MaxCapacityUnits = aws.Int64(int64(v)) + } + + if v, ok := tfMap["min_capacity_units"].(int); ok && v != 0 { + apiObject.MinCapacityUnits = aws.Int64(int64(v)) + } + + if v, ok := tfMap["multi_az"].(bool); ok { + apiObject.MultiAZ = aws.Bool(v) + } + + if v, ok := tfMap["preferred_maintenance_window"].(string); ok && v != "" { + apiObject.PreferredMaintenanceWindow = aws.String(v) + } + + if v, ok := tfMap["replication_subnet_group_id"].(string); ok && v != "" { + apiObject.ReplicationSubnetGroupId = aws.String(v) + } + + if v, ok := tfMap["vpc_security_group_ids"].(*schema.Set); ok && v.Len() > 0 { + apiObject.VpcSecurityGroupIds = flex.ExpandStringSet(v) + } + + return apiObject +} diff --git a/internal/service/dms/replication_config_test.go b/internal/service/dms/replication_config_test.go new file mode 100644 index 00000000000..33a25671334 --- /dev/null +++ b/internal/service/dms/replication_config_test.go @@ -0,0 +1,477 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dms_test + +import ( + "context" + "fmt" + "testing" + + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccDMSReplicationConfig_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.availability_zone", ""), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.dns_name_servers", ""), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.max_capacity_units", "128"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.min_capacity_units", "2"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.multi_az", "false"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.preferred_maintenance_window", "sun:23:45-mon:00:30"), + resource.TestCheckResourceAttrSet(resourceName, "compute_config.0.replication_subnet_group_id"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.vpc_security_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "replication_config_identifier", rName), + resource.TestCheckResourceAttrSet(resourceName, "replication_settings"), + resource.TestCheckResourceAttr(resourceName, "replication_type", "cdc"), + resource.TestCheckNoResourceAttr(resourceName, "resource_identifier"), + resource.TestCheckResourceAttrSet(resourceName, "source_endpoint_arn"), + resource.TestCheckResourceAttr(resourceName, "start_replication", "false"), + resource.TestCheckResourceAttr(resourceName, "supplemental_settings", ""), + resource.TestCheckResourceAttrSet(resourceName, "table_mappings"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "target_endpoint_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"start_replication", "resource_identifier"}, + }, + }, + }) +} + +func TestAccDMSReplicationConfig_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfdms.ResourceReplicationConfig(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccDMSReplicationConfig_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + Config: testAccReplicationConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccReplicationConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccDMSReplicationConfig_update(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationConfig_update(rName, "cdc", 2, 16), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_type", "cdc"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.max_capacity_units", "16"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.min_capacity_units", "2"), + ), + }, + { + Config: testAccReplicationConfig_update(rName, "cdc", 4, 32), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_type", "cdc"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.max_capacity_units", "32"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.min_capacity_units", "4"), + ), + }, + }, + }) +} + +func TestAccDMSReplicationConfig_startReplication(t *testing.T) { + ctx := acctest.Context(t) + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationConfig_startReplication(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "start_replication", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"start_replication", "resource_identifier"}, + }, + { + Config: testAccReplicationConfig_startReplication(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "start_replication", "false"), + ), + }, + }, + }) +} + +func testAccCheckReplicationConfigExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + + _, err := tfdms.FindReplicationConfigByARN(ctx, conn, rs.Primary.ID) + + return err + } +} + +func testAccCheckReplicationConfigDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_dms_replication_config" { + continue + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + + _, err := tfdms.FindReplicationConfigByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("DMS Replication Config %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccReplicationConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` +resource "aws_dms_replication_subnet_group" "test" { + replication_subnet_group_id = %[1]q + replication_subnet_group_description = "terraform test" + subnet_ids = aws_subnet.test[*].id +} + +resource "aws_db_subnet_group" "test" { + name = %[1]q + subnet_ids = aws_subnet.test[*].id +} + +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = %[1]q + } +} + +resource "aws_rds_cluster_parameter_group" "test" { + name = "%[1]s-pg-cluster" + family = "aurora-mysql5.7" + description = "DMS cluster parameter group" + + parameter { + name = "binlog_format" + value = "ROW" + apply_method = "pending-reboot" + } + + parameter { + name = "binlog_row_image" + value = "Full" + apply_method = "pending-reboot" + } + + parameter { + name = "binlog_checksum" + value = "NONE" + apply_method = "pending-reboot" + } +} + +resource "aws_rds_cluster" "test1" { + cluster_identifier = "%[1]s-aurora-cluster-source" + engine = "aurora-mysql" + engine_version = "5.7.mysql_aurora.2.11.2" + database_name = "tftest" + master_username = "tftest" + master_password = "mustbeeightcharaters" + skip_final_snapshot = true + vpc_security_group_ids = [aws_security_group.test.id] + db_subnet_group_name = aws_db_subnet_group.test.name + db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.test.name +} + +resource "aws_rds_cluster_instance" "test1" { + identifier = "%[1]s-test1-primary" + cluster_identifier = aws_rds_cluster.test1.id + instance_class = "db.t2.small" + engine = aws_rds_cluster.test1.engine + engine_version = aws_rds_cluster.test1.engine_version + db_subnet_group_name = aws_db_subnet_group.test.name +} + +resource "aws_rds_cluster" "test2" { + cluster_identifier = "%[1]s-aurora-cluster-target" + engine = "aurora-mysql" + engine_version = "5.7.mysql_aurora.2.11.2" + database_name = "tftest" + master_username = "tftest" + master_password = "mustbeeightcharaters" + skip_final_snapshot = true + vpc_security_group_ids = [aws_security_group.test.id] + db_subnet_group_name = aws_db_subnet_group.test.name +} + +resource "aws_rds_cluster_instance" "test2" { + identifier = "%[1]s-test2-primary" + cluster_identifier = aws_rds_cluster.test2.id + instance_class = "db.t2.small" + engine = aws_rds_cluster.test2.engine + engine_version = aws_rds_cluster.test2.engine_version + db_subnet_group_name = aws_db_subnet_group.test.name +} + +resource "aws_dms_endpoint" "target" { + database_name = "tftest" + endpoint_id = "%[1]s-target" + endpoint_type = "target" + engine_name = "aurora" + server_name = aws_rds_cluster.test2.endpoint + port = 3306 + username = "tftest" + password = "mustbeeightcharaters" +} + +resource "aws_dms_endpoint" "source" { + database_name = "tftest" + endpoint_id = "%[1]s-source" + endpoint_type = "source" + engine_name = "aurora" + server_name = aws_rds_cluster.test1.endpoint + port = 3306 + username = "tftest" + password = "mustbeeightcharaters" +} +`, rName)) +} + +func testAccReplicationConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +resource "aws_dms_replication_config" "test" { + replication_config_identifier = %[1]q + replication_type = "cdc" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + + compute_config { + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id + max_capacity_units = "128" + min_capacity_units = "2" + preferred_maintenance_window = "sun:23:45-mon:00:30" + } +} +`, rName)) +} + +func testAccReplicationConfig_update(rName, replicationType string, minCapacity, maxCapacity int) string { + return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +resource "aws_dms_replication_config" "test" { + replication_config_identifier = %[1]q + resource_identifier = %[1]q + replication_type = %[2]q + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + + compute_config { + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id + max_capacity_units = "%[3]d" + min_capacity_units = "%[4]d" + preferred_maintenance_window = "sun:23:45-mon:00:30" + } +} +`, rName, replicationType, maxCapacity, minCapacity)) +} + +func testAccReplicationConfig_startReplication(rName string, start bool) string { + return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +resource "aws_dms_replication_config" "test" { + replication_config_identifier = %[1]q + resource_identifier = %[1]q + replication_type = "cdc" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + + start_replication = %[2]t + + compute_config { + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id + max_capacity_units = "128" + min_capacity_units = "2" + preferred_maintenance_window = "sun:23:45-mon:00:30" + } +} +`, rName, start)) +} + +func testAccReplicationConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +resource "aws_dms_replication_config" "test" { + replication_config_identifier = %[1]q + replication_type = "cdc" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + + compute_config { + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id + max_capacity_units = "128" + min_capacity_units = "2" + preferred_maintenance_window = "sun:23:45-mon:00:30" + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccReplicationConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +resource "aws_dms_replication_config" "test" { + replication_config_identifier = %[1]q + replication_type = "cdc" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + + compute_config { + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id + max_capacity_units = "128" + min_capacity_units = "2" + preferred_maintenance_window = "sun:23:45-mon:00:30" + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/dms/service_package_gen.go b/internal/service/dms/service_package_gen.go index 3df3cff8dea..d48ac2af062 100644 --- a/internal/service/dms/service_package_gen.go +++ b/internal/service/dms/service_package_gen.go @@ -74,6 +74,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceReplicationConfig, + TypeName: "aws_dms_replication_config", + Name: "Replication Config", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "id", + }, + }, { Factory: ResourceReplicationInstance, TypeName: "aws_dms_replication_instance", diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 183eab92323..a0f0dc5e045 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -875,7 +875,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, ErrCodeNoSuchCORSConfiguration, errCodeNotImplemented, errCodeXNotImplemented) { + if err != nil && !tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeNotImplemented, errCodeXNotImplemented) { return sdkdiag.AppendErrorf(diags, "getting S3 Bucket CORS configuration: %s", err) } @@ -906,7 +906,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err != nil && !tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, - ErrCodeNoSuchWebsiteConfiguration, + errCodeNoSuchWebsiteConfiguration, errCodeXNotImplemented, ) { return sdkdiag.AppendErrorf(diags, "getting S3 Bucket website configuration: %s", err) diff --git a/internal/service/s3/bucket_accelerate_configuration.go b/internal/service/s3/bucket_accelerate_configuration.go index 91b3c8cd039..dc5230d1c98 100644 --- a/internal/service/s3/bucket_accelerate_configuration.go +++ b/internal/service/s3/bucket_accelerate_configuration.go @@ -80,6 +80,14 @@ func resourceBucketAccelerateConfigurationCreate(ctx context.Context, d *schema. d.SetId(CreateResourceID(bucket, expectedBucketOwner)) + _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findBucketAccelerateConfiguration(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket Accelerate Configuration (%s) create: %s", d.Id(), err) + } + return resourceBucketAccelerateConfigurationRead(ctx, d, meta) } @@ -165,6 +173,8 @@ func resourceBucketAccelerateConfigurationDelete(ctx context.Context, d *schema. return diag.Errorf("deleting S3 Bucket Accelerate Configuration (%s): %s", d.Id(), err) } + // Don't wait for the accelerate configuration to disappear as it still exists after suspension. + return nil } diff --git a/internal/service/s3/bucket_acl.go b/internal/service/s3/bucket_acl.go index 20083b1f5a2..550ee07f8bf 100644 --- a/internal/service/s3/bucket_acl.go +++ b/internal/service/s3/bucket_acl.go @@ -8,16 +8,18 @@ import ( "fmt" "log" "strings" - "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -30,7 +32,8 @@ func ResourceBucketACL() *schema.Resource { CreateWithoutTimeout: resourceBucketACLCreate, ReadWithoutTimeout: resourceBucketACLRead, UpdateWithoutTimeout: resourceBucketACLUpdate, - DeleteWithoutTimeout: resourceBucketACLDelete, + DeleteWithoutTimeout: schema.NoopContext, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -68,9 +71,9 @@ func ResourceBucketACL() *schema.Resource { Optional: true, }, "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.Type_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.Type](), }, "uri": { Type: schema.TypeString, @@ -80,9 +83,9 @@ func ResourceBucketACL() *schema.Resource { }, }, "permission": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.Permission_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.Permission](), }, }, }, @@ -109,10 +112,10 @@ func ResourceBucketACL() *schema.Resource { }, }, "acl": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"access_control_policy"}, - ValidateFunc: validation.StringInSlice(BucketCannedACL_Values(), false), + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"access_control_policy"}, + ValidateDiagFunc: enum.Validate[types.BucketCannedACL](), }, "bucket": { Type: schema.TypeString, @@ -131,20 +134,17 @@ func ResourceBucketACL() *schema.Resource { } func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) expectedBucketOwner := d.Get("expected_bucket_owner").(string) acl := d.Get("acl").(string) - input := &s3.PutBucketAclInput{ Bucket: aws.String(bucket), } - if acl != "" { - input.ACL = aws.String(acl) + input.ACL = types.BucketCannedACL(acl) } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } @@ -153,63 +153,59 @@ func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta i input.AccessControlPolicy = expandBucketACLAccessControlPolicy(v.([]interface{})) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.PutBucketAclWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketAcl(ctx, input) + }, errCodeNoSuchBucket) if err != nil { - return diag.Errorf("creating S3 bucket ACL for %s: %s", bucket, err) + return diag.Errorf("creating S3 Bucket (%s) ACL: %s", bucket, err) } d.SetId(BucketACLCreateResourceID(bucket, expectedBucketOwner, acl)) + _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findBucketACL(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket ACL (%s) create: %s", d.Id(), err) + } + return resourceBucketACLRead(ctx, d, meta) } func resourceBucketACLRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, acl, err := BucketACLParseResourceID(d.Id()) if err != nil { return diag.FromErr(err) } - input := &s3.GetBucketAclInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketAclWithContext(ctx, input) + output, err := findBucketACL(ctx, conn, bucket, expectedBucketOwner) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket ACL (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return diag.Errorf("getting S3 bucket ACL (%s): %s", d.Id(), err) + return diag.Errorf("reading S3 Bucket ACL (%s): %s", d.Id(), err) } - if output == nil { - return diag.Errorf("getting S3 bucket ACL (%s): empty output", d.Id()) + if err := d.Set("access_control_policy", flattenBucketACLAccessControlPolicy(output)); err != nil { + return diag.Errorf("setting access_control_policy: %s", err) } - d.Set("acl", acl) d.Set("bucket", bucket) d.Set("expected_bucket_owner", expectedBucketOwner) - if err := d.Set("access_control_policy", flattenBucketACLAccessControlPolicy(output)); err != nil { - return diag.Errorf("setting access_control_policy: %s", err) - } return nil } func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, acl, err := BucketACLParseResourceID(d.Id()) if err != nil { @@ -219,7 +215,6 @@ func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta i input := &s3.PutBucketAclInput{ Bucket: aws.String(bucket), } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } @@ -230,10 +225,10 @@ func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta i if d.HasChange("acl") { acl = d.Get("acl").(string) - input.ACL = aws.String(acl) + input.ACL = types.BucketCannedACL(acl) } - _, err = conn.PutBucketAclWithContext(ctx, input) + _, err = conn.PutBucketAcl(ctx, input) if err != nil { return diag.Errorf("updating S3 bucket ACL (%s): %s", d.Id(), err) @@ -247,12 +242,7 @@ func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta i return resourceBucketACLRead(ctx, d, meta) } -func resourceBucketACLDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - log.Printf("[WARN] Cannot destroy S3 Bucket ACL. Terraform will remove this resource from the state file, however resources may remain.") - return nil -} - -func expandBucketACLAccessControlPolicy(l []interface{}) *s3.AccessControlPolicy { +func expandBucketACLAccessControlPolicy(l []interface{}) *types.AccessControlPolicy { if len(l) == 0 || l[0] == nil { return nil } @@ -262,7 +252,7 @@ func expandBucketACLAccessControlPolicy(l []interface{}) *s3.AccessControlPolicy return nil } - result := &s3.AccessControlPolicy{} + result := &types.AccessControlPolicy{} if v, ok := tfMap["grant"].(*schema.Set); ok && v.Len() > 0 { result.Grants = expandBucketACLAccessControlPolicyGrants(v.List()) @@ -275,8 +265,8 @@ func expandBucketACLAccessControlPolicy(l []interface{}) *s3.AccessControlPolicy return result } -func expandBucketACLAccessControlPolicyGrants(l []interface{}) []*s3.Grant { - var grants []*s3.Grant +func expandBucketACLAccessControlPolicyGrants(l []interface{}) []types.Grant { + var grants []types.Grant for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -284,14 +274,14 @@ func expandBucketACLAccessControlPolicyGrants(l []interface{}) []*s3.Grant { continue } - grant := &s3.Grant{} + grant := types.Grant{} if v, ok := tfMap["grantee"].([]interface{}); ok && len(v) > 0 && v[0] != nil { grant.Grantee = expandBucketACLAccessControlPolicyGrantsGrantee(v) } if v, ok := tfMap["permission"].(string); ok && v != "" { - grant.Permission = aws.String(v) + grant.Permission = types.Permission(v) } grants = append(grants, grant) @@ -300,7 +290,7 @@ func expandBucketACLAccessControlPolicyGrants(l []interface{}) []*s3.Grant { return grants } -func expandBucketACLAccessControlPolicyGrantsGrantee(l []interface{}) *s3.Grantee { +func expandBucketACLAccessControlPolicyGrantsGrantee(l []interface{}) *types.Grantee { if len(l) == 0 || l[0] == nil { return nil } @@ -310,7 +300,7 @@ func expandBucketACLAccessControlPolicyGrantsGrantee(l []interface{}) *s3.Grante return nil } - result := &s3.Grantee{} + result := &types.Grantee{} if v, ok := tfMap["email_address"].(string); ok && v != "" { result.EmailAddress = aws.String(v) @@ -321,7 +311,7 @@ func expandBucketACLAccessControlPolicyGrantsGrantee(l []interface{}) *s3.Grante } if v, ok := tfMap["type"].(string); ok && v != "" { - result.Type = aws.String(v) + result.Type = types.Type(v) } if v, ok := tfMap["uri"].(string); ok && v != "" { @@ -331,7 +321,7 @@ func expandBucketACLAccessControlPolicyGrantsGrantee(l []interface{}) *s3.Grante return result } -func expandBucketACLAccessControlPolicyOwner(l []interface{}) *s3.Owner { +func expandBucketACLAccessControlPolicyOwner(l []interface{}) *types.Owner { if len(l) == 0 || l[0] == nil { return nil } @@ -341,7 +331,7 @@ func expandBucketACLAccessControlPolicyOwner(l []interface{}) *s3.Owner { return nil } - owner := &s3.Owner{} + owner := &types.Owner{} if v, ok := tfMap["display_name"].(string); ok && v != "" { owner.DisplayName = aws.String(v) @@ -372,61 +362,53 @@ func flattenBucketACLAccessControlPolicy(output *s3.GetBucketAclOutput) []interf return []interface{}{m} } -func flattenBucketACLAccessControlPolicyGrants(grants []*s3.Grant) []interface{} { +func flattenBucketACLAccessControlPolicyGrants(grants []types.Grant) []interface{} { var results []interface{} for _, grant := range grants { - if grant == nil { - continue + m := map[string]interface{}{ + "permission": grant.Permission, } - m := make(map[string]interface{}) - if grant.Grantee != nil { m["grantee"] = flattenBucketACLAccessControlPolicyGrantsGrantee(grant.Grantee) } - if grant.Permission != nil { - m["permission"] = aws.StringValue(grant.Permission) - } - results = append(results, m) } return results } -func flattenBucketACLAccessControlPolicyGrantsGrantee(grantee *s3.Grantee) []interface{} { +func flattenBucketACLAccessControlPolicyGrantsGrantee(grantee *types.Grantee) []interface{} { if grantee == nil { return []interface{}{} } - m := make(map[string]interface{}) + m := map[string]interface{}{ + "type": grantee.Type, + } if grantee.DisplayName != nil { - m["display_name"] = aws.StringValue(grantee.DisplayName) + m["display_name"] = aws.ToString(grantee.DisplayName) } if grantee.EmailAddress != nil { - m["email_address"] = aws.StringValue(grantee.EmailAddress) + m["email_address"] = aws.ToString(grantee.EmailAddress) } if grantee.ID != nil { - m["id"] = aws.StringValue(grantee.ID) - } - - if grantee.Type != nil { - m["type"] = aws.StringValue(grantee.Type) + m["id"] = aws.ToString(grantee.ID) } if grantee.URI != nil { - m["uri"] = aws.StringValue(grantee.URI) + m["uri"] = aws.ToString(grantee.URI) } return []interface{}{m} } -func flattenBucketACLAccessControlPolicyOwner(owner *s3.Owner) []interface{} { +func flattenBucketACLAccessControlPolicyOwner(owner *types.Owner) []interface{} { if owner == nil { return []interface{}{} } @@ -434,11 +416,11 @@ func flattenBucketACLAccessControlPolicyOwner(owner *s3.Owner) []interface{} { m := make(map[string]interface{}) if owner.DisplayName != nil { - m["display_name"] = aws.StringValue(owner.DisplayName) + m["display_name"] = aws.ToString(owner.DisplayName) } if owner.ID != nil { - m["id"] = aws.StringValue(owner.ID) + m["id"] = aws.ToString(owner.ID) } return []interface{}{m} @@ -513,3 +495,31 @@ func BucketACLParseResourceID(id string) (string, string, string, error) { return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET or BUCKET%[2]sEXPECTED_BUCKET_OWNER or BUCKET%[2]sACL "+ "or BUCKET%[2]sEXPECTED_BUCKET_OWNER%[2]sACL", id, BucketACLSeparator) } + +func findBucketACL(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketAclOutput, error) { + input := &s3.GetBucketAclInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketAcl(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} diff --git a/internal/service/s3/bucket_acl_test.go b/internal/service/s3/bucket_acl_test.go index 10596b95e4f..b6fae42eeab 100644 --- a/internal/service/s3/bucket_acl_test.go +++ b/internal/service/s3/bucket_acl_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestBucketACLParseResourceID(t *testing.T) { @@ -263,9 +264,9 @@ func TestAccS3BucketACL_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), @@ -297,9 +298,9 @@ func TestAccS3BucketACL_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), @@ -323,22 +324,22 @@ func TestAccS3BucketACL_migrate_aclNoChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_acl(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketConfig_acl(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, bucketResourceName), - resource.TestCheckResourceAttr(bucketResourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(bucketResourceName, "acl", string(types.BucketCannedACLPrivate)), ), }, { - Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketACLConfig_basic(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(resourceName, "acl", string(types.BucketCannedACLPrivate)), ), }, }, @@ -353,22 +354,22 @@ func TestAccS3BucketACL_migrate_aclWithChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_acl(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketConfig_acl(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, bucketResourceName), - resource.TestCheckResourceAttr(bucketResourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(bucketResourceName, "acl", string(types.BucketCannedACLPrivate)), ), }, { - Config: testAccBucketACLConfig_basic_withDisabledPublicAccessBlock(bucketName, s3.BucketCannedACLPublicRead), + Config: testAccBucketACLConfig_basic_withDisabledPublicAccessBlock(bucketName, string(types.BucketCannedACLPublicRead)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPublicRead), + resource.TestCheckResourceAttr(resourceName, "acl", string(types.BucketCannedACLPublicRead)), ), }, }, @@ -383,15 +384,15 @@ func TestAccS3BucketACL_migrate_grantsWithChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_acl(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketConfig_acl(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, bucketResourceName), - resource.TestCheckResourceAttr(bucketResourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(bucketResourceName, "acl", string(types.BucketCannedACLPrivate)), ), }, { @@ -402,14 +403,14 @@ func TestAccS3BucketACL_migrate_grantsWithChange(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.grant.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ "grantee.#": "1", - "grantee.0.type": s3.TypeCanonicalUser, - "permission": s3.PermissionRead, + "grantee.0.type": string(types.TypeCanonicalUser), + "permission": string(types.PermissionRead), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "access_control_policy.0.grant.*.grantee.0.id", "data.aws_canonical_user_id.current", "id"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ "grantee.#": "1", - "grantee.0.type": s3.TypeGroup, - "permission": s3.PermissionReadAcp, + "grantee.0.type": string(types.TypeGroup), + "permission": string(types.PermissionReadAcp), }), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]*regexp.Regexp{ "grantee.0.uri": regexache.MustCompile(`http://acs.*/groups/s3/LogDelivery`), @@ -429,22 +430,22 @@ func TestAccS3BucketACL_updateACL(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketACLConfig_basic(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(resourceName, "acl", string(types.BucketCannedACLPrivate)), ), }, { - Config: testAccBucketACLConfig_basic_withDisabledPublicAccessBlock(bucketName, s3.BucketCannedACLPublicRead), + Config: testAccBucketACLConfig_basic_withDisabledPublicAccessBlock(bucketName, string(types.BucketCannedACLPublicRead)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPublicRead), + resource.TestCheckResourceAttr(resourceName, "acl", string(types.BucketCannedACLPublicRead)), ), }, { @@ -463,9 +464,9 @@ func TestAccS3BucketACL_updateGrant(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { Config: testAccBucketACLConfig_grants(bucketName), @@ -475,13 +476,13 @@ func TestAccS3BucketACL_updateGrant(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.grant.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ "grantee.#": "1", - "grantee.0.type": s3.TypeCanonicalUser, - "permission": s3.PermissionFullControl, + "grantee.0.type": string(types.TypeCanonicalUser), + "permission": string(types.PermissionFullControl), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ "grantee.#": "1", - "grantee.0.type": s3.TypeCanonicalUser, - "permission": s3.PermissionWrite, + "grantee.0.type": string(types.TypeCanonicalUser), + "permission": string(types.PermissionWrite), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "access_control_policy.0.grant.*.grantee.0.id", "data.aws_canonical_user_id.current", "id"), resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.owner.#", "1"), @@ -501,14 +502,14 @@ func TestAccS3BucketACL_updateGrant(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.grant.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ "grantee.#": "1", - "grantee.0.type": s3.TypeCanonicalUser, - "permission": s3.PermissionRead, + "grantee.0.type": string(types.TypeCanonicalUser), + "permission": string(types.PermissionRead), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "access_control_policy.0.grant.*.grantee.0.id", "data.aws_canonical_user_id.current", "id"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ "grantee.#": "1", - "grantee.0.type": s3.TypeGroup, - "permission": s3.PermissionReadAcp, + "grantee.0.type": string(types.TypeGroup), + "permission": string(types.PermissionReadAcp), }), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]*regexp.Regexp{ "grantee.0.uri": regexache.MustCompile(`http://acs.*/groups/s3/LogDelivery`), @@ -533,15 +534,15 @@ func TestAccS3BucketACL_ACLToGrant(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketACLConfig_basic(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(resourceName, "acl", string(types.BucketCannedACLPrivate)), resource.TestCheckResourceAttr(resourceName, "access_control_policy.#", "1"), ), }, @@ -569,9 +570,9 @@ func TestAccS3BucketACL_grantToACL(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { Config: testAccBucketACLConfig_grants(bucketName), @@ -582,10 +583,10 @@ func TestAccS3BucketACL_grantToACL(t *testing.T) { ), }, { - Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketACLConfig_basic(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(resourceName, "acl", string(types.BucketCannedACLPrivate)), resource.TestCheckResourceAttr(resourceName, "access_control_policy.#", "1"), ), }, @@ -605,36 +606,16 @@ func testAccCheckBucketACLExists(ctx context.Context, n string) resource.TestChe return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - bucket, expectedBucketOwner, _, err := tfs3.BucketACLParseResourceID(rs.Primary.ID) if err != nil { return err } - input := &s3.GetBucketAclInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketAclWithContext(ctx, input) - - if err != nil { - return err - } + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - if output == nil || len(output.Grants) == 0 || output.Owner == nil { - return fmt.Errorf("S3 bucket ACL %s not found", rs.Primary.ID) - } + _, err = tfs3.FindBucketACL(ctx, conn, bucket, expectedBucketOwner) - return nil + return err } } diff --git a/internal/service/s3/bucket_analytics_configuration.go b/internal/service/s3/bucket_analytics_configuration.go index da918fab8e8..4c15b36d9b5 100644 --- a/internal/service/s3/bucket_analytics_configuration.go +++ b/internal/service/s3/bucket_analytics_configuration.go @@ -8,16 +8,16 @@ import ( "fmt" "log" "strings" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -31,16 +31,12 @@ func ResourceBucketAnalyticsConfiguration() *schema.Resource { ReadWithoutTimeout: resourceBucketAnalyticsConfigurationRead, UpdateWithoutTimeout: resourceBucketAnalyticsConfigurationPut, DeleteWithoutTimeout: resourceBucketAnalyticsConfigurationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, "bucket": { Type: schema.TypeString, Required: true, @@ -55,17 +51,22 @@ func ResourceBucketAnalyticsConfiguration() *schema.Resource { "prefix": { Type: schema.TypeString, Optional: true, - AtLeastOneOf: filterAtLeastOneOfKeys, + AtLeastOneOf: []string{"filter.0.prefix", "filter.0.tags"}, }, "tags": { Type: schema.TypeMap, Optional: true, - AtLeastOneOf: filterAtLeastOneOfKeys, Elem: &schema.Schema{Type: schema.TypeString}, + AtLeastOneOf: []string{"filter.0.prefix", "filter.0.tags"}, }, }, }, }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, "storage_class_analysis": { Type: schema.TypeList, Optional: true, @@ -79,10 +80,10 @@ func ResourceBucketAnalyticsConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "output_schema_version": { - Type: schema.TypeString, - Optional: true, - Default: s3.StorageClassAnalysisSchemaVersionV1, - ValidateFunc: validation.StringInSlice([]string{s3.StorageClassAnalysisSchemaVersionV1}, false), + Type: schema.TypeString, + Optional: true, + Default: types.StorageClassAnalysisSchemaVersionV1, + ValidateDiagFunc: enum.Validate[types.StorageClassAnalysisSchemaVersion](), }, "destination": { Type: schema.TypeList, @@ -107,10 +108,10 @@ func ResourceBucketAnalyticsConfiguration() *schema.Resource { ValidateFunc: verify.ValidAccountID, }, "format": { - Type: schema.TypeString, - Optional: true, - Default: s3.AnalyticsS3ExportFileFormatCsv, - ValidateFunc: validation.StringInSlice([]string{s3.AnalyticsS3ExportFileFormatCsv}, false), + Type: schema.TypeString, + Optional: true, + Default: types.AnalyticsS3ExportFileFormatCsv, + ValidateDiagFunc: enum.Validate[types.AnalyticsS3ExportFileFormat](), }, "prefix": { Type: schema.TypeString, @@ -132,101 +133,78 @@ func ResourceBucketAnalyticsConfiguration() *schema.Resource { } } -var filterAtLeastOneOfKeys = []string{"filter.0.prefix", "filter.0.tags"} - func resourceBucketAnalyticsConfigurationPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) - bucket := d.Get("bucket").(string) name := d.Get("name").(string) - - log.Printf("[DEBUG] S3 bucket %q, add analytics configuration %q", bucket, name) - - analyticsConfiguration := &s3.AnalyticsConfiguration{ + analyticsConfiguration := &types.AnalyticsConfiguration{ Id: aws.String(name), - Filter: ExpandAnalyticsFilter(ctx, d.Get("filter").([]interface{})), - StorageClassAnalysis: ExpandStorageClassAnalysis(d.Get("storage_class_analysis").([]interface{})), + StorageClassAnalysis: expandStorageClassAnalysis(d.Get("storage_class_analysis").([]interface{})), } + if v, ok := d.GetOk("filter"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + analyticsConfiguration.Filter = expandAnalyticsFilter(ctx, v.([]interface{})[0].(map[string]interface{})) + } + + bucket := d.Get("bucket").(string) input := &s3.PutBucketAnalyticsConfigurationInput{ Bucket: aws.String(bucket), Id: aws.String(name), AnalyticsConfiguration: analyticsConfiguration, } - err := retry.RetryContext(ctx, 1*time.Minute, func() *retry.RetryError { - _, err := conn.PutBucketAnalyticsConfigurationWithContext(ctx, input) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketAnalyticsConfiguration(ctx, input) + }, errCodeNoSuchBucket) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - return retry.RetryableError(err) - } + if err != nil { + return diag.Errorf("creating S3 Bucket (%s) Analytics Configuration (%s): %s", bucket, name, err) + } - if err != nil { - return retry.NonRetryableError(err) - } - return nil - }) + if d.IsNewResource() { + d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - if tfresource.TimedOut(err) { - _, err = conn.PutBucketAnalyticsConfigurationWithContext(ctx, input) - } + _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findAnalyticsConfiguration(ctx, conn, bucket, name) + }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "adding S3 Bucket Analytics Configuration: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket Analytics Configuration (%s) create: %s", d.Id(), err) + } } - d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - return append(diags, resourceBucketAnalyticsConfigurationRead(ctx, d, meta)...) } func resourceBucketAnalyticsConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, name, err := BucketAnalyticsConfigurationParseID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading S3 Bucket Analytics Configuration (%s): %s", d.Id(), err) - } - - d.Set("bucket", bucket) - d.Set("name", name) - - input := &s3.GetBucketAnalyticsConfigurationInput{ - Bucket: aws.String(bucket), - Id: aws.String(name), + return sdkdiag.AppendFromErr(diags, err) } - log.Printf("[DEBUG] Reading S3 bucket analytics configuration: %s", input) - output, err := conn.GetBucketAnalyticsConfigurationWithContext(ctx, input) - - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - log.Printf("[WARN] S3 Bucket Analytics Configuration (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } + ac, err := findAnalyticsConfiguration(ctx, conn, bucket, name) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchConfiguration) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket Analytics Configuration (%s) not found, removing from state", d.Id()) d.SetId("") - return diags + return nil } if err != nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket Analytics Configuration (%s): %s", d.Id(), err) - } - - if output == nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket Analytics Configuration (%s): empty response", d.Id()) + return diag.Errorf("reading S3 Bucket Analytics Configuration (%s): %s", d.Id(), err) } - if err := d.Set("filter", FlattenAnalyticsFilter(ctx, output.AnalyticsConfiguration.Filter)); err != nil { + d.Set("bucket", bucket) + if err := d.Set("filter", flattenAnalyticsFilter(ctx, ac.Filter)); err != nil { return sdkdiag.AppendErrorf(diags, "setting filter: %s", err) } - - if err = d.Set("storage_class_analysis", FlattenStorageClassAnalysis(output.AnalyticsConfiguration.StorageClassAnalysis)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting storage class anyalytics: %s", err) + d.Set("name", name) + if err = d.Set("storage_class_analysis", flattenStorageClassAnalysis(ac.StorageClassAnalysis)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting storage_class_analysis: %s", err) } return diags @@ -234,30 +212,35 @@ func resourceBucketAnalyticsConfigurationRead(ctx context.Context, d *schema.Res func resourceBucketAnalyticsConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, name, err := BucketAnalyticsConfigurationParseID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting S3 analytics configuration (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - input := &s3.DeleteBucketAnalyticsConfigurationInput{ + log.Printf("[DEBUG] Deleting S3 Bucket Analytics Configuration: %s", d.Id()) + _, err = conn.DeleteBucketAnalyticsConfiguration(ctx, &s3.DeleteBucketAnalyticsConfigurationInput{ Bucket: aws.String(bucket), Id: aws.String(name), + }) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchConfiguration) { + return diags } - log.Printf("[DEBUG] Deleting S3 bucket analytics configuration: %s", input) - _, err = conn.DeleteBucketAnalyticsConfigurationWithContext(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "NoSuchConfiguration", "The specified configuration does not exist.") { - return diags - } - return sdkdiag.AppendErrorf(diags, "deleting S3 analytics configuration (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Analytics Configuration (%s): %s", d.Id(), err) } - if err := WaitForDeleteBucketAnalyticsConfiguration(ctx, conn, bucket, name, 1*time.Minute); err != nil { - return sdkdiag.AppendErrorf(diags, "deleting S3 analytics configuration (%s): %s", d.Id(), err) + _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findAnalyticsConfiguration(ctx, conn, bucket, name) + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket Analytics Configuration (%s) delete: %s", d.Id(), err) } + return nil } @@ -271,46 +254,50 @@ func BucketAnalyticsConfigurationParseID(id string) (string, string, error) { return bucket, name, nil } -func ExpandAnalyticsFilter(ctx context.Context, l []interface{}) *s3.AnalyticsFilter { - if len(l) == 0 || l[0] == nil { - return nil - } - - m := l[0].(map[string]interface{}) - +func expandAnalyticsFilter(ctx context.Context, m map[string]interface{}) types.AnalyticsFilter { var prefix string if v, ok := m["prefix"]; ok { prefix = v.(string) } - var tags []*s3.Tag + var tags []types.Tag if v, ok := m["tags"]; ok { - tags = Tags(tftags.New(ctx, v).IgnoreAWS()) + tags = tagsV2(tftags.New(ctx, v).IgnoreAWS()) } if prefix == "" && len(tags) == 0 { return nil } - analyticsFilter := &s3.AnalyticsFilter{} + + var analyticsFilter types.AnalyticsFilter + if prefix != "" && len(tags) > 0 { - analyticsFilter.And = &s3.AnalyticsAndOperator{ - Prefix: aws.String(prefix), - Tags: tags, + analyticsFilter = &types.AnalyticsFilterMemberAnd{ + Value: types.AnalyticsAndOperator{ + Prefix: aws.String(prefix), + Tags: tags, + }, } } else if len(tags) > 1 { - analyticsFilter.And = &s3.AnalyticsAndOperator{ - Tags: tags, + analyticsFilter = &types.AnalyticsFilterMemberAnd{ + Value: types.AnalyticsAndOperator{ + Tags: tags, + }, } } else if len(tags) == 1 { - analyticsFilter.Tag = tags[0] + analyticsFilter = &types.AnalyticsFilterMemberTag{ + Value: tags[0], + } } else { - analyticsFilter.Prefix = aws.String(prefix) + analyticsFilter = &types.AnalyticsFilterMemberPrefix{ + Value: prefix, + } } return analyticsFilter } -func ExpandStorageClassAnalysis(l []interface{}) *s3.StorageClassAnalysis { - result := &s3.StorageClassAnalysis{} +func expandStorageClassAnalysis(l []interface{}) *types.StorageClassAnalysis { + result := &types.StorageClassAnalysis{} if len(l) == 0 || l[0] == nil { return result @@ -318,14 +305,14 @@ func ExpandStorageClassAnalysis(l []interface{}) *s3.StorageClassAnalysis { m := l[0].(map[string]interface{}) if v, ok := m["data_export"]; ok { - dataExport := &s3.StorageClassAnalysisDataExport{} + dataExport := &types.StorageClassAnalysisDataExport{} result.DataExport = dataExport foo := v.([]interface{}) if len(foo) != 0 && foo[0] != nil { bar := foo[0].(map[string]interface{}) if v, ok := bar["output_schema_version"]; ok { - dataExport.OutputSchemaVersion = aws.String(v.(string)) + dataExport.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(v.(string)) } dataExport.Destination = expandAnalyticsExportDestination(bar["destination"].([]interface{})) @@ -335,8 +322,8 @@ func ExpandStorageClassAnalysis(l []interface{}) *s3.StorageClassAnalysis { return result } -func expandAnalyticsExportDestination(edl []interface{}) *s3.AnalyticsExportDestination { - result := &s3.AnalyticsExportDestination{} +func expandAnalyticsExportDestination(edl []interface{}) *types.AnalyticsExportDestination { + result := &types.AnalyticsExportDestination{} if len(edl) != 0 && edl[0] != nil { edm := edl[0].(map[string]interface{}) @@ -345,13 +332,13 @@ func expandAnalyticsExportDestination(edl []interface{}) *s3.AnalyticsExportDest return result } -func expandAnalyticsBucketDestination(bdl []interface{}) *s3.AnalyticsS3BucketDestination { - result := &s3.AnalyticsS3BucketDestination{} +func expandAnalyticsBucketDestination(bdl []interface{}) *types.AnalyticsS3BucketDestination { + result := &types.AnalyticsS3BucketDestination{} if len(bdl) != 0 && bdl[0] != nil { bdm := bdl[0].(map[string]interface{}) result.Bucket = aws.String(bdm["bucket_arn"].(string)) - result.Format = aws.String(bdm["format"].(string)) + result.Format = types.AnalyticsS3ExportFileFormat(bdm["format"].(string)) if v, ok := bdm["bucket_account_id"]; ok && v != "" { result.BucketAccountId = aws.String(v.(string)) @@ -365,41 +352,39 @@ func expandAnalyticsBucketDestination(bdl []interface{}) *s3.AnalyticsS3BucketDe return result } -func FlattenAnalyticsFilter(ctx context.Context, analyticsFilter *s3.AnalyticsFilter) []map[string]interface{} { - if analyticsFilter == nil { - return nil - } - +func flattenAnalyticsFilter(ctx context.Context, analyticsFilter types.AnalyticsFilter) []map[string]interface{} { result := make(map[string]interface{}) - if and := analyticsFilter.And; and != nil { - if and.Prefix != nil { - result["prefix"] = aws.StringValue(and.Prefix) + + switch v := analyticsFilter.(type) { + case *types.AnalyticsFilterMemberAnd: + if v := v.Value.Prefix; v != nil { + result["prefix"] = aws.ToString(v) } - if and.Tags != nil { - result["tags"] = KeyValueTags(ctx, and.Tags).IgnoreAWS().Map() + if v := v.Value.Tags; v != nil { + result["tags"] = keyValueTagsV2(ctx, v).IgnoreAWS().Map() } - } else if analyticsFilter.Prefix != nil { - result["prefix"] = aws.StringValue(analyticsFilter.Prefix) - } else if analyticsFilter.Tag != nil { - tags := []*s3.Tag{ - analyticsFilter.Tag, + case *types.AnalyticsFilterMemberPrefix: + result["prefix"] = v.Value + case *types.AnalyticsFilterMemberTag: + tags := []types.Tag{ + v.Value, } - result["tags"] = KeyValueTags(ctx, tags).IgnoreAWS().Map() - } else { + result["tags"] = keyValueTagsV2(ctx, tags).IgnoreAWS().Map() + default: return nil } + return []map[string]interface{}{result} } -func FlattenStorageClassAnalysis(storageClassAnalysis *s3.StorageClassAnalysis) []map[string]interface{} { +func flattenStorageClassAnalysis(storageClassAnalysis *types.StorageClassAnalysis) []map[string]interface{} { if storageClassAnalysis == nil || storageClassAnalysis.DataExport == nil { return []map[string]interface{}{} } dataExport := storageClassAnalysis.DataExport - de := make(map[string]interface{}) - if dataExport.OutputSchemaVersion != nil { - de["output_schema_version"] = aws.StringValue(dataExport.OutputSchemaVersion) + de := map[string]interface{}{ + "output_schema_version": dataExport.OutputSchemaVersion, } if dataExport.Destination != nil { de["destination"] = flattenAnalyticsExportDestination(dataExport.Destination) @@ -411,7 +396,7 @@ func FlattenStorageClassAnalysis(storageClassAnalysis *s3.StorageClassAnalysis) return []map[string]interface{}{result} } -func flattenAnalyticsExportDestination(destination *s3.AnalyticsExportDestination) []interface{} { +func flattenAnalyticsExportDestination(destination *types.AnalyticsExportDestination) []interface{} { if destination == nil || destination.S3BucketDestination == nil { return []interface{}{} } @@ -423,56 +408,47 @@ func flattenAnalyticsExportDestination(destination *s3.AnalyticsExportDestinatio } } -func flattenAnalyticsBucketDestination(bucketDestination *s3.AnalyticsS3BucketDestination) []interface{} { +func flattenAnalyticsBucketDestination(bucketDestination *types.AnalyticsS3BucketDestination) []interface{} { if bucketDestination == nil { return nil } result := map[string]interface{}{ - "bucket_arn": aws.StringValue(bucketDestination.Bucket), - "format": aws.StringValue(bucketDestination.Format), + "bucket_arn": aws.ToString(bucketDestination.Bucket), + "format": bucketDestination.Format, } if bucketDestination.BucketAccountId != nil { - result["bucket_account_id"] = aws.StringValue(bucketDestination.BucketAccountId) + result["bucket_account_id"] = aws.ToString(bucketDestination.BucketAccountId) } if bucketDestination.Prefix != nil { - result["prefix"] = aws.StringValue(bucketDestination.Prefix) + result["prefix"] = aws.ToString(bucketDestination.Prefix) } return []interface{}{result} } -func WaitForDeleteBucketAnalyticsConfiguration(ctx context.Context, conn *s3.S3, bucket, name string, timeout time.Duration) error { +func findAnalyticsConfiguration(ctx context.Context, conn *s3.Client, bucket, id string) (*types.AnalyticsConfiguration, error) { input := &s3.GetBucketAnalyticsConfigurationInput{ Bucket: aws.String(bucket), - Id: aws.String(name), + Id: aws.String(id), } - err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { - output, err := conn.GetBucketAnalyticsConfigurationWithContext(ctx, input) + output, err := conn.GetBucketAnalyticsConfiguration(ctx, input) - if err != nil { - return retry.NonRetryableError(err) - } - - if output != nil && output.AnalyticsConfiguration != nil { - return retry.RetryableError(fmt.Errorf("S3 bucket analytics configuration exists: %v", output)) + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchConfiguration) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } - - return nil - }) - - if tfresource.TimedOut(err) { // nosemgrep:ci.helper-schema-TimeoutError-check-doesnt-return-output - _, err = conn.GetBucketAnalyticsConfigurationWithContext(ctx, input) } - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "NoSuchConfiguration", "The specified configuration does not exist.") { - return nil + if err != nil { + return nil, err } - if err != nil { - return fmt.Errorf("deleting S3 Bucket Analytics Configuration \"%s:%s\": %w", bucket, name, err) + if output == nil || output.AnalyticsConfiguration == nil { + return nil, tfresource.NewEmptyResultError(input) } - return nil + return output.AnalyticsConfiguration, nil } diff --git a/internal/service/s3/bucket_analytics_configuration_test.go b/internal/service/s3/bucket_analytics_configuration_test.go index 5929a2d5953..bfa16612ed9 100644 --- a/internal/service/s3/bucket_analytics_configuration_test.go +++ b/internal/service/s3/bucket_analytics_configuration_test.go @@ -6,31 +6,29 @@ package s3_test import ( "context" "fmt" - "reflect" - "sort" "testing" - "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3BucketAnalyticsConfiguration_basic(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_analytics_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -53,15 +51,15 @@ func TestAccS3BucketAnalyticsConfiguration_basic(t *testing.T) { }) } -func TestAccS3BucketAnalyticsConfiguration_removed(t *testing.T) { +func TestAccS3BucketAnalyticsConfiguration_disappears(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_analytics_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -69,13 +67,9 @@ func TestAccS3BucketAnalyticsConfiguration_removed(t *testing.T) { Config: testAccBucketAnalyticsConfigurationConfig_basic(rName, rName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketAnalyticsConfigurationExists(ctx, resourceName, &ac), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3.ResourceBucketAnalyticsConfiguration(), resourceName), ), - }, - { - Config: testAccBucketAnalyticsConfigurationConfig_removed(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketAnalyticsConfigurationRemoved(ctx, rName, rName), - ), + ExpectNonEmptyPlan: true, }, }, }) @@ -83,7 +77,7 @@ func TestAccS3BucketAnalyticsConfiguration_removed(t *testing.T) { func TestAccS3BucketAnalyticsConfiguration_updateBasic(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration originalACName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) originalBucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) updatedACName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -92,7 +86,7 @@ func TestAccS3BucketAnalyticsConfiguration_updateBasic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -110,7 +104,6 @@ func TestAccS3BucketAnalyticsConfiguration_updateBasic(t *testing.T) { Config: testAccBucketAnalyticsConfigurationConfig_basic(updatedACName, originalBucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketAnalyticsConfigurationExists(ctx, resourceName, &ac), - testAccCheckBucketAnalyticsConfigurationRemoved(ctx, originalACName, originalBucketName), resource.TestCheckResourceAttr(resourceName, "name", updatedACName), resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "bucket"), resource.TestCheckResourceAttr(resourceName, "filter.#", "0"), @@ -121,7 +114,6 @@ func TestAccS3BucketAnalyticsConfiguration_updateBasic(t *testing.T) { Config: testAccBucketAnalyticsConfigurationConfig_update(updatedACName, originalBucketName, updatedBucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketAnalyticsConfigurationExists(ctx, resourceName, &ac), - testAccCheckBucketAnalyticsConfigurationRemoved(ctx, updatedACName, originalBucketName), resource.TestCheckResourceAttr(resourceName, "name", updatedACName), resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test_2", "bucket"), resource.TestCheckResourceAttr(resourceName, "filter.#", "0"), @@ -143,7 +135,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_empty(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -157,7 +149,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_empty(t *testing.T) { func TestAccS3BucketAnalyticsConfiguration_WithFilter_prefix(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_analytics_configuration.test" @@ -167,7 +159,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_prefix(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -200,7 +192,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_prefix(t *testing.T) { func TestAccS3BucketAnalyticsConfiguration_WithFilter_singleTag(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_analytics_configuration.test" @@ -210,7 +202,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_singleTag(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -245,7 +237,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_singleTag(t *testing.T) { func TestAccS3BucketAnalyticsConfiguration_WithFilter_multipleTags(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_analytics_configuration.test" @@ -257,7 +249,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_multipleTags(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -294,7 +286,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_multipleTags(t *testing.T) func TestAccS3BucketAnalyticsConfiguration_WithFilter_prefixAndTags(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_analytics_configuration.test" @@ -308,7 +300,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_prefixAndTags(t *testing.T resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -345,7 +337,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_prefixAndTags(t *testing.T func TestAccS3BucketAnalyticsConfiguration_WithFilter_remove(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_analytics_configuration.test" @@ -354,7 +346,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithFilter_remove(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -386,7 +378,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_empty(t *tes resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -400,14 +392,14 @@ func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_empty(t *tes func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_default(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration resourceName := "aws_s3_bucket_analytics_configuration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -435,7 +427,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_default(t *t func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_full(t *testing.T) { ctx := acctest.Context(t) - var ac s3.AnalyticsConfiguration + var ac types.AnalyticsConfiguration resourceName := "aws_s3_bucket_analytics_configuration.test" rInt := sdkacctest.RandInt() @@ -444,7 +436,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_full(t *test resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -473,7 +465,7 @@ func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_full(t *test func testAccCheckBucketAnalyticsConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_analytics_configuration" { @@ -485,80 +477,75 @@ func testAccCheckBucketAnalyticsConfigurationDestroy(ctx context.Context) resour return err } - return tfs3.WaitForDeleteBucketAnalyticsConfiguration(ctx, conn, bucket, name, 1*time.Minute) + _, err = tfs3.FindAnalyticsConfiguration(ctx, conn, bucket, name) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("S3 Bucket Analytics Configuration %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckBucketAnalyticsConfigurationExists(ctx context.Context, n string, ac *s3.AnalyticsConfiguration) resource.TestCheckFunc { +func testAccCheckBucketAnalyticsConfigurationExists(ctx context.Context, n string, v *types.AnalyticsConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - output, err := conn.GetBucketAnalyticsConfigurationWithContext(ctx, &s3.GetBucketAnalyticsConfigurationInput{ - Bucket: aws.String(rs.Primary.Attributes["bucket"]), - Id: aws.String(rs.Primary.Attributes["name"]), - }) - + bucket, name, err := tfs3.BucketAnalyticsConfigurationParseID(rs.Primary.ID) if err != nil { return err } - if output == nil || output.AnalyticsConfiguration == nil { - return fmt.Errorf("error reading S3 Bucket Analytics Configuration %q: empty response", rs.Primary.ID) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + output, err := tfs3.FindAnalyticsConfiguration(ctx, conn, bucket, name) + + if err != nil { + return err } - *ac = *output.AnalyticsConfiguration + *v = *output return nil } } -func testAccCheckBucketAnalyticsConfigurationRemoved(ctx context.Context, name, bucket string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - return tfs3.WaitForDeleteBucketAnalyticsConfiguration(ctx, conn, bucket, name, 1*time.Minute) - } -} - func testAccBucketAnalyticsConfigurationConfig_basic(name, bucket string) string { return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[2]q } `, name, bucket) } -func testAccBucketAnalyticsConfigurationConfig_removed(bucket string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = "%s" -} -`, bucket) -} - func testAccBucketAnalyticsConfigurationConfig_update(name, originalBucket, updatedBucket string) string { return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test_2.bucket - name = "%s" + name = %[1]q } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[2]q } resource "aws_s3_bucket" "test_2" { - bucket = "%s" + bucket = %[3]q } `, name, originalBucket, updatedBucket) } @@ -567,14 +554,14 @@ func testAccBucketAnalyticsConfigurationConfig_emptyFilter(name, bucket string) return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q filter { } } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[2]q } `, name, bucket) } @@ -583,15 +570,15 @@ func testAccBucketAnalyticsConfigurationConfig_filterPrefix(name, bucket, prefix return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q filter { - prefix = "%s" + prefix = %[2]q } } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[3]q } `, name, prefix, bucket) } @@ -600,17 +587,17 @@ func testAccBucketAnalyticsConfigurationConfig_filterSingleTag(name, bucket, tag return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q filter { tags = { - "tag1" = "%s" + "tag1" = %[2]q } } } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[3]q } `, name, tag, bucket) } @@ -619,18 +606,18 @@ func testAccBucketAnalyticsConfigurationConfig_filterMultipleTags(name, bucket, return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q filter { tags = { - "tag1" = "%s" - "tag2" = "%s" + "tag1" = %[2]q + "tag2" = %[3]q } } } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[4]q } `, name, tag1, tag2, bucket) } @@ -639,20 +626,20 @@ func testAccBucketAnalyticsConfigurationConfig_filterPrefixAndTags(name, bucket, return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q filter { - prefix = "%s" + prefix = %[2]q tags = { - "tag1" = "%s" - "tag2" = "%s" + "tag1" = %[3]q + "tag2" = %[4]q } } } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[5]q } `, name, prefix, tag1, tag2, bucket) } @@ -661,14 +648,14 @@ func testAccBucketAnalyticsConfigurationConfig_emptyStorageClassAnalysis(name, b return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q storage_class_analysis { } } resource "aws_s3_bucket" "test" { - bucket = "%s" + bucket = %[2]q } `, name, bucket) } @@ -677,7 +664,7 @@ func testAccBucketAnalyticsConfigurationConfig_defaultStorageClassAnalysis(name, return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q storage_class_analysis { data_export { @@ -691,7 +678,7 @@ resource "aws_s3_bucket_analytics_configuration" "test" { } resource "aws_s3_bucket" "test" { - bucket = "%[2]s" + bucket = %[2]q } resource "aws_s3_bucket" "destination" { @@ -704,7 +691,7 @@ func testAccBucketAnalyticsConfigurationConfig_fullStorageClassAnalysis(name, bu return fmt.Sprintf(` resource "aws_s3_bucket_analytics_configuration" "test" { bucket = aws_s3_bucket.test.bucket - name = "%s" + name = %[1]q storage_class_analysis { data_export { @@ -714,7 +701,7 @@ resource "aws_s3_bucket_analytics_configuration" "test" { s3_bucket_destination { format = "CSV" bucket_arn = aws_s3_bucket.destination.arn - prefix = "%s" + prefix = %[2]q } } } @@ -722,7 +709,7 @@ resource "aws_s3_bucket_analytics_configuration" "test" { } resource "aws_s3_bucket" "test" { - bucket = "%[3]s" + bucket = %[3]q } resource "aws_s3_bucket" "destination" { @@ -730,539 +717,3 @@ resource "aws_s3_bucket" "destination" { } `, name, prefix, bucket) } - -func TestExpandAnalyticsFilter(t *testing.T) { - t.Parallel() - - ctx := context.Background() - testCases := map[string]struct { - Input []interface{} - Expected *s3.AnalyticsFilter - }{ - "nil input": { - Input: nil, - Expected: nil, - }, - "empty input": { - Input: []interface{}{}, - Expected: nil, - }, - "prefix only": { - Input: []interface{}{ - map[string]interface{}{ - "prefix": "prefix/", - }, - }, - Expected: &s3.AnalyticsFilter{ - Prefix: aws.String("prefix/"), - }, - }, - "prefix and single tag": { - Input: []interface{}{ - map[string]interface{}{ - "prefix": "prefix/", - "tags": map[string]interface{}{ - "tag1key": "tag1value", - }, - }, - }, - Expected: &s3.AnalyticsFilter{ - And: &s3.AnalyticsAndOperator{ - Prefix: aws.String("prefix/"), - Tags: []*s3.Tag{ - { - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - }, - }, - }, - }, - "prefix and multiple tags": { - Input: []interface{}{map[string]interface{}{ - "prefix": "prefix/", - "tags": map[string]interface{}{ - "tag1key": "tag1value", - "tag2key": "tag2value", - }, - }, - }, - Expected: &s3.AnalyticsFilter{ - And: &s3.AnalyticsAndOperator{ - Prefix: aws.String("prefix/"), - Tags: []*s3.Tag{ - { - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - { - Key: aws.String("tag2key"), - Value: aws.String("tag2value"), - }, - }, - }, - }, - }, - "single tag only": { - Input: []interface{}{ - map[string]interface{}{ - "tags": map[string]interface{}{ - "tag1key": "tag1value", - }, - }, - }, - Expected: &s3.AnalyticsFilter{ - Tag: &s3.Tag{ - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - }, - }, - "multiple tags only": { - Input: []interface{}{ - map[string]interface{}{ - "tags": map[string]interface{}{ - "tag1key": "tag1value", - "tag2key": "tag2value", - }, - }, - }, - Expected: &s3.AnalyticsFilter{ - And: &s3.AnalyticsAndOperator{ - Tags: []*s3.Tag{ - { - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - { - Key: aws.String("tag2key"), - Value: aws.String("tag2value"), - }, - }, - }, - }, - }, - } - - for k, tc := range testCases { - value := tfs3.ExpandAnalyticsFilter(ctx, tc.Input) - - if value == nil { - if tc.Expected == nil { - continue - } - - t.Errorf("Case %q: Got nil\nExpected:\n%v", k, tc.Expected) - } - - if tc.Expected == nil { - t.Errorf("Case %q: Got: %v\nExpected: nil", k, value) - } - - // Sort tags by key for consistency - if value.And != nil && value.And.Tags != nil { - sort.Slice(value.And.Tags, func(i, j int) bool { - return *value.And.Tags[i].Key < *value.And.Tags[j].Key - }) - } - - // Convert to strings to avoid dealing with pointers - valueS := fmt.Sprintf("%v", value) - expectedValueS := fmt.Sprintf("%v", tc.Expected) - - if valueS != expectedValueS { - t.Errorf("Case %q: Given:\n%s\n\nExpected:\n%s", k, valueS, expectedValueS) - } - } -} - -func TestExpandStorageClassAnalysis(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - Input []interface{} - Expected *s3.StorageClassAnalysis - }{ - "nil input": { - Input: nil, - Expected: &s3.StorageClassAnalysis{}, - }, - "empty input": { - Input: []interface{}{}, - Expected: &s3.StorageClassAnalysis{}, - }, - "nil array": { - Input: []interface{}{ - nil, - }, - Expected: &s3.StorageClassAnalysis{}, - }, - "empty data_export": { - Input: []interface{}{ - map[string]interface{}{ - "data_export": []interface{}{}, - }, - }, - Expected: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{}, - }, - }, - "data_export complete": { - Input: []interface{}{ - map[string]interface{}{ - "data_export": []interface{}{ - map[string]interface{}{ - "output_schema_version": s3.StorageClassAnalysisSchemaVersionV1, - "destination": []interface{}{}, - }, - }, - }, - }, - Expected: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{ - OutputSchemaVersion: aws.String(s3.StorageClassAnalysisSchemaVersionV1), - Destination: &s3.AnalyticsExportDestination{}, - }, - }, - }, - "empty s3_bucket_destination": { - Input: []interface{}{ - map[string]interface{}{ - "data_export": []interface{}{ - map[string]interface{}{ - "destination": []interface{}{ - map[string]interface{}{ - "s3_bucket_destination": []interface{}{}, - }, - }, - }, - }, - }, - }, - Expected: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{ - Destination: &s3.AnalyticsExportDestination{ - S3BucketDestination: &s3.AnalyticsS3BucketDestination{}, - }, - }, - }, - }, - "s3_bucket_destination complete": { - Input: []interface{}{ - map[string]interface{}{ - "data_export": []interface{}{ - map[string]interface{}{ - "destination": []interface{}{ - map[string]interface{}{ - "s3_bucket_destination": []interface{}{ - map[string]interface{}{ - "bucket_arn": "arn:aws:s3", //lintignore:AWSAT005 - "bucket_account_id": "1234567890", - "format": s3.AnalyticsS3ExportFileFormatCsv, - "prefix": "prefix/", - }, - }, - }, - }, - }, - }, - }, - }, - Expected: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{ - Destination: &s3.AnalyticsExportDestination{ - S3BucketDestination: &s3.AnalyticsS3BucketDestination{ - Bucket: aws.String("arn:aws:s3"), //lintignore:AWSAT005 - BucketAccountId: aws.String("1234567890"), - Format: aws.String(s3.AnalyticsS3ExportFileFormatCsv), - Prefix: aws.String("prefix/"), - }, - }, - }, - }, - }, - "s3_bucket_destination required": { - Input: []interface{}{ - map[string]interface{}{ - "data_export": []interface{}{ - map[string]interface{}{ - "destination": []interface{}{ - map[string]interface{}{ - "s3_bucket_destination": []interface{}{ - map[string]interface{}{ - "bucket_arn": "arn:aws:s3", //lintignore:AWSAT005 - "format": s3.AnalyticsS3ExportFileFormatCsv, - }, - }, - }, - }, - }, - }, - }, - }, - Expected: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{ - Destination: &s3.AnalyticsExportDestination{ - S3BucketDestination: &s3.AnalyticsS3BucketDestination{ - Bucket: aws.String("arn:aws:s3"), //lintignore:AWSAT005 - BucketAccountId: nil, - Format: aws.String(s3.AnalyticsS3ExportFileFormatCsv), - Prefix: nil, - }, - }, - }, - }, - }, - } - - for k, tc := range testCases { - value := tfs3.ExpandStorageClassAnalysis(tc.Input) - - if !reflect.DeepEqual(value, tc.Expected) { - t.Errorf("Case %q:\nGot:\n%v\nExpected:\n%v", k, value, tc.Expected) - } - } -} - -func TestFlattenAnalyticsFilter(t *testing.T) { - t.Parallel() - - ctx := context.Background() - testCases := map[string]struct { - Input *s3.AnalyticsFilter - Expected []map[string]interface{} - }{ - "nil input": { - Input: nil, - Expected: nil, - }, - "empty input": { - Input: &s3.AnalyticsFilter{}, - Expected: nil, - }, - "prefix only": { - Input: &s3.AnalyticsFilter{ - Prefix: aws.String("prefix/"), - }, - Expected: []map[string]interface{}{ - { - "prefix": "prefix/", - }, - }, - }, - "prefix and single tag": { - Input: &s3.AnalyticsFilter{ - And: &s3.AnalyticsAndOperator{ - Prefix: aws.String("prefix/"), - Tags: []*s3.Tag{ - { - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - }, - }, - }, - Expected: []map[string]interface{}{ - { - "prefix": "prefix/", - "tags": map[string]string{ - "tag1key": "tag1value", - }, - }, - }, - }, - "prefix and multiple tags": { - Input: &s3.AnalyticsFilter{ - And: &s3.AnalyticsAndOperator{ - Prefix: aws.String("prefix/"), - Tags: []*s3.Tag{ - { - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - { - Key: aws.String("tag2key"), - Value: aws.String("tag2value"), - }, - }, - }, - }, - Expected: []map[string]interface{}{ - { - "prefix": "prefix/", - "tags": map[string]string{ - "tag1key": "tag1value", - "tag2key": "tag2value", - }, - }, - }, - }, - "single tag only": { - Input: &s3.AnalyticsFilter{ - Tag: &s3.Tag{ - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - }, - Expected: []map[string]interface{}{ - { - "tags": map[string]string{ - "tag1key": "tag1value", - }, - }, - }, - }, - "multiple tags only": { - Input: &s3.AnalyticsFilter{ - And: &s3.AnalyticsAndOperator{ - Tags: []*s3.Tag{ - { - Key: aws.String("tag1key"), - Value: aws.String("tag1value"), - }, - { - Key: aws.String("tag2key"), - Value: aws.String("tag2value"), - }, - }, - }, - }, - Expected: []map[string]interface{}{ - { - "tags": map[string]string{ - "tag1key": "tag1value", - "tag2key": "tag2value", - }, - }, - }, - }, - } - - for k, tc := range testCases { - value := tfs3.FlattenAnalyticsFilter(ctx, tc.Input) - - if !reflect.DeepEqual(value, tc.Expected) { - t.Errorf("Case %q: Got:\n%v\n\nExpected:\n%v", k, value, tc.Expected) - } - } -} - -func TestFlattenStorageClassAnalysis(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - Input *s3.StorageClassAnalysis - Expected []map[string]interface{} - }{ - "nil value": { - Input: nil, - Expected: []map[string]interface{}{}, - }, - "empty root": { - Input: &s3.StorageClassAnalysis{}, - Expected: []map[string]interface{}{}, - }, - "empty data_export": { - Input: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{}, - }, - Expected: []map[string]interface{}{ - { - "data_export": []interface{}{ - map[string]interface{}{}, - }, - }, - }, - }, - "data_export complete": { - Input: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{ - OutputSchemaVersion: aws.String(s3.StorageClassAnalysisSchemaVersionV1), - Destination: &s3.AnalyticsExportDestination{}, - }, - }, - Expected: []map[string]interface{}{ - { - "data_export": []interface{}{ - map[string]interface{}{ - "output_schema_version": s3.StorageClassAnalysisSchemaVersionV1, - "destination": []interface{}{}, - }, - }, - }, - }, - }, - "s3_bucket_destination required": { - Input: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{ - Destination: &s3.AnalyticsExportDestination{ - S3BucketDestination: &s3.AnalyticsS3BucketDestination{ - Bucket: aws.String("arn:aws:s3"), //lintignore:AWSAT005 - Format: aws.String(s3.AnalyticsS3ExportFileFormatCsv), - }, - }, - }, - }, - Expected: []map[string]interface{}{ - { - "data_export": []interface{}{ - map[string]interface{}{ - "destination": []interface{}{ - map[string]interface{}{ - "s3_bucket_destination": []interface{}{ - map[string]interface{}{ - "bucket_arn": "arn:aws:s3", //lintignore:AWSAT005 - "format": s3.AnalyticsS3ExportFileFormatCsv, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "s3_bucket_destination complete": { - Input: &s3.StorageClassAnalysis{ - DataExport: &s3.StorageClassAnalysisDataExport{ - Destination: &s3.AnalyticsExportDestination{ - S3BucketDestination: &s3.AnalyticsS3BucketDestination{ - Bucket: aws.String("arn:aws:s3"), //lintignore:AWSAT005 - BucketAccountId: aws.String("1234567890"), - Format: aws.String(s3.AnalyticsS3ExportFileFormatCsv), - Prefix: aws.String("prefix/"), - }, - }, - }, - }, - Expected: []map[string]interface{}{ - { - "data_export": []interface{}{ - map[string]interface{}{ - "destination": []interface{}{ - map[string]interface{}{ - "s3_bucket_destination": []interface{}{ - map[string]interface{}{ - "bucket_arn": "arn:aws:s3", //lintignore:AWSAT005 - "bucket_account_id": "1234567890", - "format": s3.AnalyticsS3ExportFileFormatCsv, - "prefix": "prefix/", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - for k, tc := range testCases { - value := tfs3.FlattenStorageClassAnalysis(tc.Input) - - if !reflect.DeepEqual(value, tc.Expected) { - t.Errorf("Case %q:\nGot:\n%v\nExpected:\n%v", k, value, tc.Expected) - } - } -} diff --git a/internal/service/s3/bucket_cors_configuration.go b/internal/service/s3/bucket_cors_configuration.go index eb529e2f6bf..03ad1fc6fa6 100644 --- a/internal/service/s3/bucket_cors_configuration.go +++ b/internal/service/s3/bucket_cors_configuration.go @@ -6,12 +6,13 @@ package s3 import ( "context" "log" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -27,6 +28,7 @@ func ResourceBucketCorsConfiguration() *schema.Resource { ReadWithoutTimeout: resourceBucketCorsConfigurationRead, UpdateWithoutTimeout: resourceBucketCorsConfigurationUpdate, DeleteWithoutTimeout: resourceBucketCorsConfigurationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -87,87 +89,72 @@ func ResourceBucketCorsConfiguration() *schema.Resource { } func resourceBucketCorsConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) expectedBucketOwner := d.Get("expected_bucket_owner").(string) - input := &s3.PutBucketCorsInput{ Bucket: aws.String(bucket), - CORSConfiguration: &s3.CORSConfiguration{ - CORSRules: expandBucketCorsConfigurationCorsRules(d.Get("cors_rule").(*schema.Set).List()), + CORSConfiguration: &types.CORSConfiguration{ + CORSRules: expandCORSRules(d.Get("cors_rule").(*schema.Set).List()), }, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.PutBucketCorsWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketCors(ctx, input) + }, errCodeNoSuchBucket) if err != nil { - return diag.Errorf("creating S3 bucket (%s) CORS configuration: %s", bucket, err) + return diag.Errorf("creating S3 Bucket (%s) CORS Configuration: %s", bucket, err) } d.SetId(CreateResourceID(bucket, expectedBucketOwner)) + _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findCORSRules(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket CORS Configuration (%s) create: %s", d.Id(), err) + } + return resourceBucketCorsConfigurationRead(ctx, d, meta) } func resourceBucketCorsConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { return diag.FromErr(err) } - input := &s3.GetBucketCorsInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - corsResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.GetBucketCorsWithContext(ctx, input) - }, ErrCodeNoSuchCORSConfiguration) + corsRules, err := findCORSRules(ctx, conn, bucket, expectedBucketOwner) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket, ErrCodeNoSuchCORSConfiguration) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket CORS Configuration (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return diag.Errorf("reading S3 bucket CORS configuration (%s): %s", d.Id(), err) - } - - output, ok := corsResponse.(*s3.GetBucketCorsOutput) - if !ok || output == nil { - if d.IsNewResource() { - return diag.Errorf("reading S3 bucket CORS configuration (%s): empty output", d.Id()) - } - log.Printf("[WARN] S3 Bucket CORS Configuration (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil + return diag.Errorf("reading S3 Bucket CORS Configuration (%s): %s", d.Id(), err) } d.Set("bucket", bucket) - d.Set("expected_bucket_owner", expectedBucketOwner) - - if err := d.Set("cors_rule", flattenBucketCorsConfigurationCorsRules(output.CORSRules)); err != nil { + if err := d.Set("cors_rule", flattenCORSRules(corsRules)); err != nil { return diag.Errorf("setting cors_rule: %s", err) } + d.Set("expected_bucket_owner", expectedBucketOwner) return nil } func resourceBucketCorsConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { @@ -176,26 +163,25 @@ func resourceBucketCorsConfigurationUpdate(ctx context.Context, d *schema.Resour input := &s3.PutBucketCorsInput{ Bucket: aws.String(bucket), - CORSConfiguration: &s3.CORSConfiguration{ - CORSRules: expandBucketCorsConfigurationCorsRules(d.Get("cors_rule").(*schema.Set).List()), + CORSConfiguration: &types.CORSConfiguration{ + CORSRules: expandCORSRules(d.Get("cors_rule").(*schema.Set).List()), }, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = conn.PutBucketCorsWithContext(ctx, input) + _, err = conn.PutBucketCors(ctx, input) if err != nil { - return diag.Errorf("updating S3 bucket CORS configuration (%s): %s", d.Id(), err) + return diag.Errorf("updating S3 Bucket CORS Configuration (%s): %s", d.Id(), err) } return resourceBucketCorsConfigurationRead(ctx, d, meta) } func resourceBucketCorsConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { @@ -205,30 +191,37 @@ func resourceBucketCorsConfigurationDelete(ctx context.Context, d *schema.Resour input := &s3.DeleteBucketCorsInput{ Bucket: aws.String(bucket), } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = conn.DeleteBucketCorsWithContext(ctx, input) + _, err = conn.DeleteBucketCors(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchCORSConfiguration) { return nil } if err != nil { - return diag.Errorf("deleting S3 bucket CORS configuration (%s): %s", d.Id(), err) + return diag.Errorf("deleting S3 Bucket CORS Configuration (%s): %s", d.Id(), err) + } + + _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findCORSRules(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket CORS Configuration (%s) delete: %s", d.Id(), err) } return nil } -func expandBucketCorsConfigurationCorsRules(l []interface{}) []*s3.CORSRule { +func expandCORSRules(l []interface{}) []types.CORSRule { if len(l) == 0 { return nil } - var rules []*s3.CORSRule + var rules []types.CORSRule for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -236,22 +229,22 @@ func expandBucketCorsConfigurationCorsRules(l []interface{}) []*s3.CORSRule { continue } - rule := &s3.CORSRule{} + rule := types.CORSRule{} if v, ok := tfMap["allowed_headers"].(*schema.Set); ok && v.Len() > 0 { - rule.AllowedHeaders = flex.ExpandStringSet(v) + rule.AllowedHeaders = flex.ExpandStringValueSet(v) } if v, ok := tfMap["allowed_methods"].(*schema.Set); ok && v.Len() > 0 { - rule.AllowedMethods = flex.ExpandStringSet(v) + rule.AllowedMethods = flex.ExpandStringValueSet(v) } if v, ok := tfMap["allowed_origins"].(*schema.Set); ok && v.Len() > 0 { - rule.AllowedOrigins = flex.ExpandStringSet(v) + rule.AllowedOrigins = flex.ExpandStringValueSet(v) } if v, ok := tfMap["expose_headers"].(*schema.Set); ok && v.Len() > 0 { - rule.ExposeHeaders = flex.ExpandStringSet(v) + rule.ExposeHeaders = flex.ExpandStringValueSet(v) } if v, ok := tfMap["id"].(string); ok && v != "" { @@ -259,7 +252,7 @@ func expandBucketCorsConfigurationCorsRules(l []interface{}) []*s3.CORSRule { } if v, ok := tfMap["max_age_seconds"].(int); ok { - rule.MaxAgeSeconds = aws.Int64(int64(v)) + rule.MaxAgeSeconds = int32(v) } rules = append(rules, rule) @@ -268,38 +261,32 @@ func expandBucketCorsConfigurationCorsRules(l []interface{}) []*s3.CORSRule { return rules } -func flattenBucketCorsConfigurationCorsRules(rules []*s3.CORSRule) []interface{} { +func flattenCORSRules(rules []types.CORSRule) []interface{} { var results []interface{} for _, rule := range rules { - if rule == nil { - continue + m := map[string]interface{}{ + "max_age_seconds": rule.MaxAgeSeconds, } - m := make(map[string]interface{}) - if len(rule.AllowedHeaders) > 0 { - m["allowed_headers"] = flex.FlattenStringSet(rule.AllowedHeaders) + m["allowed_headers"] = rule.AllowedHeaders } if len(rule.AllowedMethods) > 0 { - m["allowed_methods"] = flex.FlattenStringSet(rule.AllowedMethods) + m["allowed_methods"] = rule.AllowedMethods } if len(rule.AllowedOrigins) > 0 { - m["allowed_origins"] = flex.FlattenStringSet(rule.AllowedOrigins) + m["allowed_origins"] = rule.AllowedOrigins } if len(rule.ExposeHeaders) > 0 { - m["expose_headers"] = flex.FlattenStringSet(rule.ExposeHeaders) + m["expose_headers"] = rule.ExposeHeaders } if rule.ID != nil { - m["id"] = aws.StringValue(rule.ID) - } - - if rule.MaxAgeSeconds != nil { - m["max_age_seconds"] = aws.Int64Value(rule.MaxAgeSeconds) + m["id"] = aws.ToString(rule.ID) } results = append(results, m) @@ -307,3 +294,31 @@ func flattenBucketCorsConfigurationCorsRules(rules []*s3.CORSRule) []interface{} return results } + +func findCORSRules(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) ([]types.CORSRule, error) { + input := &s3.GetBucketCorsInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketCors(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchCORSConfiguration) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || len(output.CORSRules) == 0 { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.CORSRules, nil +} diff --git a/internal/service/s3/bucket_cors_configuration_test.go b/internal/service/s3/bucket_cors_configuration_test.go index 176ff0498b4..825ba616c33 100644 --- a/internal/service/s3/bucket_cors_configuration_test.go +++ b/internal/service/s3/bucket_cors_configuration_test.go @@ -7,11 +7,7 @@ import ( "context" "fmt" "testing" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -19,23 +15,24 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccS3BucketCorsConfiguration_basic(t *testing.T) { +func TestAccS3BucketCORSConfiguration_basic(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_cors_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketCorsConfigurationDestroy(ctx), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketCORSConfigurationConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "id"), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ @@ -55,21 +52,21 @@ func TestAccS3BucketCorsConfiguration_basic(t *testing.T) { }) } -func TestAccS3BucketCorsConfiguration_disappears(t *testing.T) { +func TestAccS3BucketCORSConfiguration_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_cors_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketCorsConfigurationDestroy(ctx), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketCORSConfigurationConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3.ResourceBucketCorsConfiguration(), resourceName), ), ExpectNonEmptyPlan: true, @@ -78,22 +75,22 @@ func TestAccS3BucketCorsConfiguration_disappears(t *testing.T) { }) } -func TestAccS3BucketCorsConfiguration_update(t *testing.T) { +func TestAccS3BucketCORSConfiguration_update(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_cors_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketCorsConfigurationDestroy(ctx), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketCORSConfigurationConfig_completeSingleRule(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "id"), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ @@ -109,7 +106,7 @@ func TestAccS3BucketCorsConfiguration_update(t *testing.T) { { Config: testAccBucketCORSConfigurationConfig_multipleRules(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "id"), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ @@ -131,7 +128,7 @@ func TestAccS3BucketCorsConfiguration_update(t *testing.T) { { Config: testAccBucketCORSConfigurationConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ "allowed_methods.#": "1", @@ -143,21 +140,21 @@ func TestAccS3BucketCorsConfiguration_update(t *testing.T) { }) } -func TestAccS3BucketCorsConfiguration_SingleRule(t *testing.T) { +func TestAccS3BucketCORSConfiguration_SingleRule(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_cors_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketCorsConfigurationDestroy(ctx), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketCORSConfigurationConfig_completeSingleRule(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "id"), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ @@ -185,21 +182,21 @@ func TestAccS3BucketCorsConfiguration_SingleRule(t *testing.T) { }) } -func TestAccS3BucketCorsConfiguration_MultipleRules(t *testing.T) { +func TestAccS3BucketCORSConfiguration_MultipleRules(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_cors_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckBucketCorsConfigurationDestroy(ctx), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketCORSConfigurationConfig_multipleRules(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "id"), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ @@ -229,7 +226,7 @@ func TestAccS3BucketCorsConfiguration_MultipleRules(t *testing.T) { }) } -func TestAccS3BucketCorsConfiguration_migrate_corsRuleNoChange(t *testing.T) { +func TestAccS3BucketCORSConfiguration_migrate_corsRuleNoChange(t *testing.T) { ctx := acctest.Context(t) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketResourceName := "aws_s3_bucket.test" @@ -237,7 +234,7 @@ func TestAccS3BucketCorsConfiguration_migrate_corsRuleNoChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -256,7 +253,7 @@ func TestAccS3BucketCorsConfiguration_migrate_corsRuleNoChange(t *testing.T) { { Config: testAccBucketCORSConfigurationConfig_migrateRuleNoChange(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "id"), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ @@ -272,7 +269,7 @@ func TestAccS3BucketCorsConfiguration_migrate_corsRuleNoChange(t *testing.T) { }) } -func TestAccS3BucketCorsConfiguration_migrate_corsRuleWithChange(t *testing.T) { +func TestAccS3BucketCORSConfiguration_migrate_corsRuleWithChange(t *testing.T) { ctx := acctest.Context(t) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketResourceName := "aws_s3_bucket.test" @@ -280,7 +277,7 @@ func TestAccS3BucketCorsConfiguration_migrate_corsRuleWithChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -299,7 +296,7 @@ func TestAccS3BucketCorsConfiguration_migrate_corsRuleWithChange(t *testing.T) { { Config: testAccBucketCORSConfigurationConfig_migrateRuleChange(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckBucketCorsConfigurationExists(ctx, resourceName), + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "id"), resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ @@ -314,9 +311,9 @@ func TestAccS3BucketCorsConfiguration_migrate_corsRuleWithChange(t *testing.T) { }) } -func testAccCheckBucketCorsConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckBucketCORSConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_cors_configuration" { @@ -328,72 +325,40 @@ func testAccCheckBucketCorsConfigurationDestroy(ctx context.Context) resource.Te return err } - input := &s3.GetBucketCorsInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } + _, err = tfs3.FindCORSRules(ctx, conn, bucket, expectedBucketOwner) - output, err := conn.GetBucketCorsWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket, tfs3.ErrCodeNoSuchCORSConfiguration) { + if tfresource.NotFound(err) { continue } if err != nil { - return fmt.Errorf("error getting S3 Bucket CORS configuration (%s): %w", rs.Primary.ID, err) + return err } - if output != nil { - return fmt.Errorf("S3 Bucket CORS configuration (%s) still exists", rs.Primary.ID) - } + return fmt.Errorf("S3 Bucket Website Configuration %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckBucketCorsConfigurationExists(ctx context.Context, resourceName string) resource.TestCheckFunc { +func testAccCheckBucketCORSConfigurationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Resource (%s) ID not set", resourceName) + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - bucket, expectedBucketOwner, err := tfs3.ParseResourceID(rs.Primary.ID) if err != nil { return err } - input := &s3.GetBucketCorsInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - corsResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.GetBucketCorsWithContext(ctx, input) - }, tfs3.ErrCodeNoSuchCORSConfiguration) + _, err = tfs3.FindCORSRules(ctx, conn, bucket, expectedBucketOwner) - if err != nil { - return fmt.Errorf("error getting S3 Bucket CORS configuration (%s): %w", rs.Primary.ID, err) - } - - if output, ok := corsResponse.(*s3.GetBucketCorsOutput); !ok || output == nil || len(output.CORSRules) == 0 { - return fmt.Errorf("S3 Bucket CORS configuration (%s) not found", rs.Primary.ID) - } - - return nil + return err } } diff --git a/internal/service/s3/bucket_metric.go b/internal/service/s3/bucket_metric.go index 3728aa6672c..11640a96c90 100644 --- a/internal/service/s3/bucket_metric.go +++ b/internal/service/s3/bucket_metric.go @@ -48,13 +48,13 @@ func ResourceBucketMetric() *schema.Resource { "prefix": { Type: schema.TypeString, Optional: true, - AtLeastOneOf: filterAtLeastOneOfKeys, + AtLeastOneOf: []string{"filter.0.prefix", "filter.0.tags"}, }, "tags": { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - AtLeastOneOf: filterAtLeastOneOfKeys, + AtLeastOneOf: []string{"filter.0.prefix", "filter.0.tags"}, }, }, }, diff --git a/internal/service/s3/bucket_object_lock_configuration.go b/internal/service/s3/bucket_object_lock_configuration.go index 4d62e015ba7..1c6c313a4ab 100644 --- a/internal/service/s3/bucket_object_lock_configuration.go +++ b/internal/service/s3/bucket_object_lock_configuration.go @@ -6,16 +6,17 @@ package s3 import ( "context" "log" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -27,6 +28,7 @@ func ResourceBucketObjectLockConfiguration() *schema.Resource { ReadWithoutTimeout: resourceBucketObjectLockConfigurationRead, UpdateWithoutTimeout: resourceBucketObjectLockConfigurationUpdate, DeleteWithoutTimeout: resourceBucketObjectLockConfigurationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -45,11 +47,11 @@ func ResourceBucketObjectLockConfiguration() *schema.Resource { ValidateFunc: verify.ValidAccountID, }, "object_lock_enabled": { - Type: schema.TypeString, - Optional: true, - Default: s3.ObjectLockEnabledEnabled, - ForceNew: true, - ValidateFunc: validation.StringInSlice(s3.ObjectLockEnabled_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: types.ObjectLockEnabledEnabled, + ValidateDiagFunc: enum.Validate[types.ObjectLockEnabled](), }, "rule": { Type: schema.TypeList, @@ -69,9 +71,9 @@ func ResourceBucketObjectLockConfiguration() *schema.Resource { ConflictsWith: []string{"rule.0.default_retention.0.years"}, }, "mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.ObjectLockRetentionMode_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ObjectLockRetentionMode](), }, "years": { Type: schema.TypeInt, @@ -94,55 +96,61 @@ func ResourceBucketObjectLockConfiguration() *schema.Resource { } func resourceBucketObjectLockConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) expectedBucketOwner := d.Get("expected_bucket_owner").(string) input := &s3.PutObjectLockConfigurationInput{ Bucket: aws.String(bucket), - ObjectLockConfiguration: &s3.ObjectLockConfiguration{ + ObjectLockConfiguration: &types.ObjectLockConfiguration{ // ObjectLockEnabled is required by the API, even if configured directly on the S3 bucket // during creation, else a MalformedXML error will be returned. - ObjectLockEnabled: aws.String(d.Get("object_lock_enabled").(string)), + ObjectLockEnabled: types.ObjectLockEnabled(d.Get("object_lock_enabled").(string)), Rule: expandBucketObjectLockConfigurationRule(d.Get("rule").([]interface{})), }, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } if v, ok := d.GetOk("request_payer"); ok { - input.RequestPayer = aws.String(v.(string)) + input.RequestPayer = types.RequestPayer(v.(string)) } if v, ok := d.GetOk("token"); ok { input.Token = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.PutObjectLockConfigurationWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutObjectLockConfiguration(ctx, input) + }, errCodeNoSuchBucket) if err != nil { - return diag.Errorf("creating S3 Bucket (%s) Object Lock configuration: %s", bucket, err) + return diag.Errorf("creating S3 Bucket (%s) Object Lock Configuration: %s", bucket, err) } d.SetId(CreateResourceID(bucket, expectedBucketOwner)) + _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findObjectLockConfiguration(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket Object Lock Configuration (%s) create: %s", d.Id(), err) + } + return resourceBucketObjectLockConfigurationRead(ctx, d, meta) } func resourceBucketObjectLockConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } - objLockConfig, err := FindObjectLockConfiguration(ctx, conn, bucket, expectedBucketOwner) + objLockConfig, err := findObjectLockConfiguration(ctx, conn, bucket, expectedBucketOwner) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket Object Lock Configuration (%s) not found, removing from state", d.Id()) @@ -165,37 +173,35 @@ func resourceBucketObjectLockConfigurationRead(ctx context.Context, d *schema.Re } func resourceBucketObjectLockConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } input := &s3.PutObjectLockConfigurationInput{ Bucket: aws.String(bucket), - ObjectLockConfiguration: &s3.ObjectLockConfiguration{ + ObjectLockConfiguration: &types.ObjectLockConfiguration{ // ObjectLockEnabled is required by the API, even if configured directly on the S3 bucket // during creation, else a MalformedXML error will be returned. - ObjectLockEnabled: aws.String(d.Get("object_lock_enabled").(string)), + ObjectLockEnabled: types.ObjectLockEnabled(d.Get("object_lock_enabled").(string)), Rule: expandBucketObjectLockConfigurationRule(d.Get("rule").([]interface{})), }, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } if v, ok := d.GetOk("request_payer"); ok { - input.RequestPayer = aws.String(v.(string)) + input.RequestPayer = types.RequestPayer(v.(string)) } if v, ok := d.GetOk("token"); ok { input.Token = aws.String(v.(string)) } - _, err = conn.PutObjectLockConfigurationWithContext(ctx, input) + _, err = conn.PutObjectLockConfiguration(ctx, input) if err != nil { return diag.Errorf("updating S3 Bucket Object Lock Configuration (%s): %s", d.Id(), err) @@ -205,30 +211,32 @@ func resourceBucketObjectLockConfigurationUpdate(ctx context.Context, d *schema. } func resourceBucketObjectLockConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } input := &s3.PutObjectLockConfigurationInput{ Bucket: aws.String(bucket), - ObjectLockConfiguration: &s3.ObjectLockConfiguration{ + ObjectLockConfiguration: &types.ObjectLockConfiguration{ // ObjectLockEnabled is required by the API, even if configured directly on the S3 bucket // during creation, else a MalformedXML error will be returned. - ObjectLockEnabled: aws.String(d.Get("object_lock_enabled").(string)), + ObjectLockEnabled: types.ObjectLockEnabled(d.Get("object_lock_enabled").(string)), }, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = conn.PutObjectLockConfigurationWithContext(ctx, input) + if v, ok := d.GetOk("request_payer"); ok { + input.RequestPayer = types.RequestPayer(v.(string)) + } - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrCodeContains(err, errCodeObjectLockConfigurationNotFound) { + _, err = conn.PutObjectLockConfiguration(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeObjectLockConfigurationNotFoundError) { return nil } @@ -236,10 +244,18 @@ func resourceBucketObjectLockConfigurationDelete(ctx context.Context, d *schema. return diag.Errorf("deleting S3 Bucket Object Lock Configuration (%s): %s", d.Id(), err) } + _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findObjectLockConfiguration(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket Object Lock Configuration (%s) delete: %s", d.Id(), err) + } + return nil } -func FindObjectLockConfiguration(ctx context.Context, conn *s3.S3, bucket, expectedBucketOwner string) (*s3.ObjectLockConfiguration, error) { +func findObjectLockConfiguration(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*types.ObjectLockConfiguration, error) { input := &s3.GetObjectLockConfigurationInput{ Bucket: aws.String(bucket), } @@ -247,9 +263,9 @@ func FindObjectLockConfiguration(ctx context.Context, conn *s3.S3, bucket, expec input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - output, err := conn.GetObjectLockConfigurationWithContext(ctx, input) + output, err := conn.GetObjectLockConfiguration(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrCodeContains(err, errCodeObjectLockConfigurationNotFound) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeObjectLockConfigurationNotFoundError) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -267,7 +283,7 @@ func FindObjectLockConfiguration(ctx context.Context, conn *s3.S3, bucket, expec return output.ObjectLockConfiguration, nil } -func expandBucketObjectLockConfigurationRule(l []interface{}) *s3.ObjectLockRule { +func expandBucketObjectLockConfigurationRule(l []interface{}) *types.ObjectLockRule { if len(l) == 0 || l[0] == nil { return nil } @@ -277,7 +293,7 @@ func expandBucketObjectLockConfigurationRule(l []interface{}) *s3.ObjectLockRule return nil } - rule := &s3.ObjectLockRule{} + rule := &types.ObjectLockRule{} if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { rule.DefaultRetention = expandBucketObjectLockConfigurationCorsRuleDefaultRetention(v) @@ -286,7 +302,7 @@ func expandBucketObjectLockConfigurationRule(l []interface{}) *s3.ObjectLockRule return rule } -func expandBucketObjectLockConfigurationCorsRuleDefaultRetention(l []interface{}) *s3.DefaultRetention { +func expandBucketObjectLockConfigurationCorsRuleDefaultRetention(l []interface{}) *types.DefaultRetention { if len(l) == 0 || l[0] == nil { return nil } @@ -296,24 +312,24 @@ func expandBucketObjectLockConfigurationCorsRuleDefaultRetention(l []interface{} return nil } - dr := &s3.DefaultRetention{} + dr := &types.DefaultRetention{} if v, ok := tfMap["days"].(int); ok && v > 0 { - dr.Days = aws.Int64(int64(v)) + dr.Days = int32(v) } if v, ok := tfMap["mode"].(string); ok && v != "" { - dr.Mode = aws.String(v) + dr.Mode = types.ObjectLockRetentionMode(v) } if v, ok := tfMap["years"].(int); ok && v > 0 { - dr.Years = aws.Int64(int64(v)) + dr.Years = int32(v) } return dr } -func flattenBucketObjectLockConfigurationRule(rule *s3.ObjectLockRule) []interface{} { +func flattenBucketObjectLockConfigurationRule(rule *types.ObjectLockRule) []interface{} { if rule == nil { return []interface{}{} } @@ -323,26 +339,19 @@ func flattenBucketObjectLockConfigurationRule(rule *s3.ObjectLockRule) []interfa if rule.DefaultRetention != nil { m["default_retention"] = flattenBucketObjectLockConfigurationRuleDefaultRetention(rule.DefaultRetention) } + return []interface{}{m} } -func flattenBucketObjectLockConfigurationRuleDefaultRetention(dr *s3.DefaultRetention) []interface{} { +func flattenBucketObjectLockConfigurationRuleDefaultRetention(dr *types.DefaultRetention) []interface{} { if dr == nil { return []interface{}{} } - m := make(map[string]interface{}) - - if dr.Days != nil { - m["days"] = int(aws.Int64Value(dr.Days)) - } - - if dr.Mode != nil { - m["mode"] = aws.StringValue(dr.Mode) - } - - if dr.Years != nil { - m["years"] = int(aws.Int64Value(dr.Years)) + m := map[string]interface{}{ + "days": dr.Days, + "mode": dr.Mode, + "years": dr.Years, } return []interface{}{m} diff --git a/internal/service/s3/bucket_object_lock_configuration_test.go b/internal/service/s3/bucket_object_lock_configuration_test.go index 180d043f292..615868a3031 100644 --- a/internal/service/s3/bucket_object_lock_configuration_test.go +++ b/internal/service/s3/bucket_object_lock_configuration_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3BucketObjectLockConfiguration_basic(t *testing.T) { @@ -25,7 +26,7 @@ func TestAccS3BucketObjectLockConfiguration_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -33,11 +34,11 @@ func TestAccS3BucketObjectLockConfiguration_basic(t *testing.T) { Config: testAccBucketObjectLockConfigurationConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketObjectLockConfigurationExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.days", "3"), - resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeCompliance), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", string(types.ObjectLockRetentionModeCompliance)), ), }, { @@ -56,7 +57,7 @@ func TestAccS3BucketObjectLockConfiguration_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -79,7 +80,7 @@ func TestAccS3BucketObjectLockConfiguration_update(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -92,11 +93,11 @@ func TestAccS3BucketObjectLockConfiguration_update(t *testing.T) { { Config: testAccBucketObjectLockConfigurationConfig_update(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.years", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeGovernance), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", string(types.ObjectLockRetentionModeGovernance)), ), }, { @@ -116,7 +117,7 @@ func TestAccS3BucketObjectLockConfiguration_migrate_noChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -125,9 +126,9 @@ func TestAccS3BucketObjectLockConfiguration_migrate_noChange(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketExists(ctx, bucketResourceName), resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.#", "1"), - resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeCompliance), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.0.default_retention.0.mode", string(types.ObjectLockRetentionModeCompliance)), resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.0.default_retention.0.days", "3"), ), }, @@ -135,11 +136,11 @@ func TestAccS3BucketObjectLockConfiguration_migrate_noChange(t *testing.T) { Config: testAccBucketObjectLockConfigurationConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketObjectLockConfigurationExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.days", "3"), - resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeCompliance), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", string(types.ObjectLockRetentionModeCompliance)), ), }, }, @@ -154,7 +155,7 @@ func TestAccS3BucketObjectLockConfiguration_migrate_withChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -163,7 +164,7 @@ func TestAccS3BucketObjectLockConfiguration_migrate_withChange(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketExists(ctx, bucketResourceName), resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.#", "0"), ), }, @@ -171,11 +172,11 @@ func TestAccS3BucketObjectLockConfiguration_migrate_withChange(t *testing.T) { Config: testAccBucketObjectLockConfigurationConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketObjectLockConfigurationExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.days", "3"), - resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeCompliance), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", string(types.ObjectLockRetentionModeCompliance)), ), }, }, @@ -189,7 +190,7 @@ func TestAccS3BucketObjectLockConfiguration_noRule(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -197,7 +198,7 @@ func TestAccS3BucketObjectLockConfiguration_noRule(t *testing.T) { Config: testAccBucketObjectLockConfigurationConfig_noRule(rName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketObjectLockConfigurationExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(resourceName, "rule.#", "0"), ), }, @@ -212,7 +213,7 @@ func TestAccS3BucketObjectLockConfiguration_noRule(t *testing.T) { func testAccCheckBucketObjectLockConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_object_lock_configuration" { @@ -220,7 +221,6 @@ func testAccCheckBucketObjectLockConfigurationDestroy(ctx context.Context) resou } bucket, expectedBucketOwner, err := tfs3.ParseResourceID(rs.Primary.ID) - if err != nil { return err } @@ -242,24 +242,19 @@ func testAccCheckBucketObjectLockConfigurationDestroy(ctx context.Context) resou } } -func testAccCheckBucketObjectLockConfigurationExists(ctx context.Context, resourceName string) resource.TestCheckFunc { +func testAccCheckBucketObjectLockConfigurationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Resource (%s) ID not set", resourceName) + return fmt.Errorf("Not found: %s", n) } bucket, expectedBucketOwner, err := tfs3.ParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) _, err = tfs3.FindObjectLockConfiguration(ctx, conn, bucket, expectedBucketOwner) @@ -285,7 +280,7 @@ resource "aws_s3_bucket_object_lock_configuration" "test" { } } } -`, bucketName, s3.ObjectLockRetentionModeCompliance) +`, bucketName, types.ObjectLockRetentionModeCompliance) } func testAccBucketObjectLockConfigurationConfig_update(bucketName string) string { @@ -306,7 +301,7 @@ resource "aws_s3_bucket_object_lock_configuration" "test" { } } } -`, bucketName, s3.ObjectLockModeGovernance) +`, bucketName, types.ObjectLockModeGovernance) } func testAccBucketObjectLockConfigurationConfig_noRule(bucketName string) string { diff --git a/internal/service/s3/bucket_policy.go b/internal/service/s3/bucket_policy.go index 9d6de12fc25..32c26742832 100644 --- a/internal/service/s3/bucket_policy.go +++ b/internal/service/s3/bucket_policy.go @@ -79,6 +79,14 @@ func resourceBucketPolicyPut(ctx context.Context, d *schema.ResourceData, meta i if d.IsNewResource() { d.SetId(bucket) + + _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findBucketPolicy(ctx, conn, d.Id()) + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket Policy (%s) create: %s", d.Id(), err) + } } return append(diags, resourceBucketPolicyRead(ctx, d, meta)...) @@ -97,7 +105,7 @@ func resourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, meta } if err != nil { - return diag.Errorf("reading S3 Bucket (%s) Policy: %s", d.Id(), err) + return diag.Errorf("reading S3 Bucket Policy (%s): %s", d.Id(), err) } policy, err = verify.PolicyToSet(d.Get("policy").(string), policy) @@ -125,7 +133,15 @@ func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, met } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) Policy: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Policy (%s): %s", d.Id(), err) + } + + _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findBucketPolicy(ctx, conn, d.Id()) + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket Policy (%s) delete: %s", d.Id(), err) } return diags diff --git a/internal/service/s3/bucket_server_side_encryption_configuration.go b/internal/service/s3/bucket_server_side_encryption_configuration.go index 4d9fa102090..0d499ff8c59 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration.go @@ -7,14 +7,16 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -26,6 +28,7 @@ func ResourceBucketServerSideEncryptionConfiguration() *schema.Resource { ReadWithoutTimeout: resourceBucketServerSideEncryptionConfigurationRead, UpdateWithoutTimeout: resourceBucketServerSideEncryptionConfigurationUpdate, DeleteWithoutTimeout: resourceBucketServerSideEncryptionConfigurationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -59,9 +62,9 @@ func ResourceBucketServerSideEncryptionConfiguration() *schema.Resource { Optional: true, }, "sse_algorithm": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ServerSideEncryption_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ServerSideEncryption](), }, }, }, @@ -78,13 +81,13 @@ func ResourceBucketServerSideEncryptionConfiguration() *schema.Resource { } func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) expectedBucketOwner := d.Get("expected_bucket_owner").(string) input := &s3.PutBucketEncryptionInput{ Bucket: aws.String(bucket), - ServerSideEncryptionConfiguration: &s3.ServerSideEncryptionConfiguration{ + ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ Rules: expandBucketServerSideEncryptionConfigurationRules(d.Get("rule").(*schema.Set).List()), }, } @@ -92,13 +95,9 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, - func() (interface{}, error) { - return conn.PutBucketEncryptionWithContext(ctx, input) - }, - s3.ErrCodeNoSuchBucket, - errCodeOperationAborted, - ) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketEncryption(ctx, input) + }, errCodeNoSuchBucket, errCodeOperationAborted) if err != nil { return diag.Errorf("creating S3 Bucket (%s) Server-side Encryption Configuration: %s", bucket, err) @@ -107,7 +106,7 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, d.SetId(CreateResourceID(bucket, expectedBucketOwner)) _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { - return FindBucketServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) + return findServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) }) if err != nil { @@ -118,14 +117,14 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, } func resourceBucketServerSideEncryptionConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { return diag.FromErr(err) } - sse, err := FindBucketServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) + sse, err := findServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket Server-side Encryption Configuration (%s) not found, removing from state", d.Id()) @@ -147,7 +146,7 @@ func resourceBucketServerSideEncryptionConfigurationRead(ctx context.Context, d } func resourceBucketServerSideEncryptionConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { @@ -156,7 +155,7 @@ func resourceBucketServerSideEncryptionConfigurationUpdate(ctx context.Context, input := &s3.PutBucketEncryptionInput{ Bucket: aws.String(bucket), - ServerSideEncryptionConfiguration: &s3.ServerSideEncryptionConfiguration{ + ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ Rules: expandBucketServerSideEncryptionConfigurationRules(d.Get("rule").(*schema.Set).List()), }, } @@ -164,13 +163,9 @@ func resourceBucketServerSideEncryptionConfigurationUpdate(ctx context.Context, input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, - func() (interface{}, error) { - return conn.PutBucketEncryptionWithContext(ctx, input) - }, - s3.ErrCodeNoSuchBucket, - errCodeOperationAborted, - ) + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketEncryption(ctx, input) + }, errCodeNoSuchBucket, errCodeOperationAborted) if err != nil { return diag.Errorf("updating S3 Bucket Server-side Encryption Configuration (%s): %s", d.Id(), err) @@ -180,7 +175,7 @@ func resourceBucketServerSideEncryptionConfigurationUpdate(ctx context.Context, } func resourceBucketServerSideEncryptionConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { @@ -194,9 +189,9 @@ func resourceBucketServerSideEncryptionConfigurationDelete(ctx context.Context, input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = conn.DeleteBucketEncryptionWithContext(ctx, input) + _, err = conn.DeleteBucketEncryption(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket, errCodeServerSideEncryptionConfigurationNotFound) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeServerSideEncryptionConfigurationNotFound) { return nil } @@ -204,10 +199,12 @@ func resourceBucketServerSideEncryptionConfigurationDelete(ctx context.Context, return diag.Errorf("deleting S3 Bucket Server-side Encryption Configuration (%s): %s", d.Id(), err) } + // Don't wait for the SSE configuration to disappear as the bucket now always has one. + return nil } -func FindBucketServerSideEncryptionConfiguration(ctx context.Context, conn *s3.S3, bucketName, expectedBucketOwner string) (*s3.ServerSideEncryptionConfiguration, error) { +func findServerSideEncryptionConfiguration(ctx context.Context, conn *s3.Client, bucketName, expectedBucketOwner string) (*types.ServerSideEncryptionConfiguration, error) { input := &s3.GetBucketEncryptionInput{ Bucket: aws.String(bucketName), } @@ -215,9 +212,9 @@ func FindBucketServerSideEncryptionConfiguration(ctx context.Context, conn *s3.S input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - output, err := conn.GetBucketEncryptionWithContext(ctx, input) + output, err := conn.GetBucketEncryption(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket, errCodeServerSideEncryptionConfigurationNotFound) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeServerSideEncryptionConfigurationNotFound) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -235,7 +232,7 @@ func FindBucketServerSideEncryptionConfiguration(ctx context.Context, conn *s3.S return output.ServerSideEncryptionConfiguration, nil } -func expandBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(l []interface{}) *s3.ServerSideEncryptionByDefault { +func expandBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(l []interface{}) *types.ServerSideEncryptionByDefault { if len(l) == 0 || l[0] == nil { return nil } @@ -245,21 +242,21 @@ func expandBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionB return nil } - sse := &s3.ServerSideEncryptionByDefault{} + sse := &types.ServerSideEncryptionByDefault{} if v, ok := tfMap["kms_master_key_id"].(string); ok && v != "" { sse.KMSMasterKeyID = aws.String(v) } if v, ok := tfMap["sse_algorithm"].(string); ok && v != "" { - sse.SSEAlgorithm = aws.String(v) + sse.SSEAlgorithm = types.ServerSideEncryption(v) } return sse } -func expandBucketServerSideEncryptionConfigurationRules(l []interface{}) []*s3.ServerSideEncryptionRule { - var rules []*s3.ServerSideEncryptionRule +func expandBucketServerSideEncryptionConfigurationRules(l []interface{}) []types.ServerSideEncryptionRule { + var rules []types.ServerSideEncryptionRule for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -267,14 +264,14 @@ func expandBucketServerSideEncryptionConfigurationRules(l []interface{}) []*s3.S continue } - rule := &s3.ServerSideEncryptionRule{} + rule := types.ServerSideEncryptionRule{} if v, ok := tfMap["apply_server_side_encryption_by_default"].([]interface{}); ok && len(v) > 0 && v[0] != nil { rule.ApplyServerSideEncryptionByDefault = expandBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(v) } if v, ok := tfMap["bucket_key_enabled"].(bool); ok { - rule.BucketKeyEnabled = aws.Bool(v) + rule.BucketKeyEnabled = v } rules = append(rules, rule) } @@ -282,22 +279,17 @@ func expandBucketServerSideEncryptionConfigurationRules(l []interface{}) []*s3.S return rules } -func flattenBucketServerSideEncryptionConfigurationRules(rules []*s3.ServerSideEncryptionRule) []interface{} { +func flattenBucketServerSideEncryptionConfigurationRules(rules []types.ServerSideEncryptionRule) []interface{} { var results []interface{} for _, rule := range rules { - if rule == nil { - continue + m := map[string]interface{}{ + "bucket_key_enabled": rule.BucketKeyEnabled, } - m := make(map[string]interface{}) - if rule.ApplyServerSideEncryptionByDefault != nil { m["apply_server_side_encryption_by_default"] = flattenBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(rule.ApplyServerSideEncryptionByDefault) } - if rule.BucketKeyEnabled != nil { - m["bucket_key_enabled"] = aws.BoolValue(rule.BucketKeyEnabled) - } results = append(results, m) } @@ -305,19 +297,17 @@ func flattenBucketServerSideEncryptionConfigurationRules(rules []*s3.ServerSideE return results } -func flattenBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(sse *s3.ServerSideEncryptionByDefault) []interface{} { +func flattenBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(sse *types.ServerSideEncryptionByDefault) []interface{} { if sse == nil { - return []interface{}{} + return nil } - m := make(map[string]interface{}) - - if sse.KMSMasterKeyID != nil { - m["kms_master_key_id"] = aws.StringValue(sse.KMSMasterKeyID) + m := map[string]interface{}{ + "sse_algorithm": sse.SSEAlgorithm, } - if sse.SSEAlgorithm != nil { - m["sse_algorithm"] = aws.StringValue(sse.SSEAlgorithm) + if sse.KMSMasterKeyID != nil { + m["kms_master_key_id"] = aws.ToString(sse.KMSMasterKeyID) } return []interface{}{m} diff --git a/internal/service/s3/bucket_server_side_encryption_configuration_test.go b/internal/service/s3/bucket_server_side_encryption_configuration_test.go index 796348cbd7b..2460aa58581 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration_test.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -15,6 +16,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3BucketServerSideEncryptionConfiguration_basic(t *testing.T) { @@ -24,7 +26,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ @@ -55,17 +57,17 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySEEByDefault_AES256(t resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, s3.ServerSideEncryptionAes256), + Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, string(types.ServerSideEncryptionAes256)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAes256)), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), ), }, @@ -85,17 +87,17 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_KMS(t *t resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, s3.ServerSideEncryptionAwsKms), + Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, string(types.ServerSideEncryptionAwsKms)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), ), }, @@ -115,17 +117,17 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_KMSDSSE( resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, s3.ServerSideEncryptionAwsKmsDsse), + Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, string(types.ServerSideEncryptionAwsKmsDsse)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKmsDsse), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKmsDsse)), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), ), }, @@ -145,17 +147,17 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_UpdateSS resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, s3.ServerSideEncryptionAwsKms), + Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, string(types.ServerSideEncryptionAwsKms)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), ), }, { @@ -164,12 +166,12 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_UpdateSS ImportStateVerify: true, }, { - Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, s3.ServerSideEncryptionAes256), + Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, string(types.ServerSideEncryptionAes256)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAes256)), ), }, { @@ -188,7 +190,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_KMSWithM resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ @@ -198,7 +200,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_KMSWithM testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckResourceAttrPair(resourceName, "rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", "aws_kms_key.test", "arn"), ), }, @@ -218,7 +220,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_KMSWithM resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ @@ -228,7 +230,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_KMSWithM testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckResourceAttrPair(resourceName, "rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", "aws_kms_key.test", "id"), ), }, @@ -248,7 +250,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_BucketKeyEnabled(t *testin resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ @@ -291,7 +293,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_BucketKe resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ @@ -302,7 +304,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_BucketKe resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "bucket"), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckResourceAttrPair(resourceName, "rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "rule.0.bucket_key_enabled", "true"), ), @@ -319,7 +321,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_BucketKe resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3_bucket.test", "bucket"), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckResourceAttrPair(resourceName, "rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "rule.0.bucket_key_enabled", "false"), ), @@ -341,7 +343,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_migrate_noChange(t *testin resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ @@ -352,7 +354,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_migrate_noChange(t *testin resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.#", "1"), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.#", "1"), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.bucket_key_enabled", "false"), ), }, @@ -363,7 +365,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_migrate_noChange(t *testin resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckNoResourceAttr(resourceName, "rule.0.bucket_key_enabled"), ), }, @@ -379,29 +381,29 @@ func TestAccS3BucketServerSideEncryptionConfiguration_migrate_withChange(t *test resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_defaultEncryptionDefaultKey(rName, s3.ServerSideEncryptionAwsKms), + Config: testAccBucketConfig_defaultEncryptionDefaultKey(rName, string(types.ServerSideEncryptionAwsKms)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketExists(ctx, bucketResourceName), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.#", "1"), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.#", "1"), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.bucket_key_enabled", "false"), ), }, { - Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, s3.ServerSideEncryptionAes256), + Config: testAccBucketServerSideEncryptionConfigurationConfig_applySSEByDefaultSSEAlgorithm(rName, string(types.ServerSideEncryptionAes256)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAes256)), resource.TestCheckNoResourceAttr(resourceName, "rule.0.bucket_key_enabled"), ), }, @@ -421,9 +423,9 @@ func testAccCheckBucketServerSideEncryptionConfigurationExists(ctx context.Conte return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - _, err = tfs3.FindBucketServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) + _, err = tfs3.FindServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) return err } diff --git a/internal/service/s3/bucket_versioning.go b/internal/service/s3/bucket_versioning.go index bd7890d79d7..a3830914332 100644 --- a/internal/service/s3/bucket_versioning.go +++ b/internal/service/s3/bucket_versioning.go @@ -136,6 +136,8 @@ func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, d.SetId(CreateResourceID(bucket, expectedBucketOwner)) + // Waiting for the versioning configuration to appear is done in resource Read. + return resourceBucketVersioningRead(ctx, d, meta) } @@ -143,14 +145,12 @@ func resourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, m conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } - var output *s3.GetBucketVersioningOutput + output, err := waitForBucketVersioningStatus(ctx, conn, bucket, expectedBucketOwner) - output, err = waitForBucketVersioningStatus(ctx, conn, bucket, expectedBucketOwner) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket Versioning (%s) not found, removing from state", d.Id()) d.SetId("") @@ -158,7 +158,7 @@ func resourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, m } if err != nil { - return diag.Errorf("getting S3 bucket versioning (%s): %s", d.Id(), err) + return diag.Errorf("reading S3 Bucket Versioning (%s): %s", d.Id(), err) } d.Set("bucket", bucket) @@ -239,6 +239,8 @@ func resourceBucketVersioningDelete(ctx context.Context, d *schema.ResourceData, return diag.Errorf("deleting S3 Bucket Versioning (%s): %s", d.Id(), err) } + // Don't wait for the versioning configuration to disappear as it still exists after suspension. + return nil } @@ -333,14 +335,11 @@ func statusBucketVersioning(ctx context.Context, conn *s3.Client, bucket, expect } func waitForBucketVersioningStatus(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketVersioningOutput, error) { - const ( - timeout = 1 * time.Minute - ) stateConf := &retry.StateChangeConf{ Pending: []string{""}, Target: bucketVersioningStatus_Values(), Refresh: statusBucketVersioning(ctx, conn, bucket, expectedBucketOwner), - Timeout: timeout, + Timeout: s3BucketPropagationTimeout, ContinuousTargetOccurence: 3, NotFoundChecks: 3, Delay: 1 * time.Second, diff --git a/internal/service/s3/bucket_website_configuration.go b/internal/service/s3/bucket_website_configuration.go index 40d87e7bfba..48e80a6b57c 100644 --- a/internal/service/s3/bucket_website_configuration.go +++ b/internal/service/s3/bucket_website_configuration.go @@ -6,18 +6,19 @@ package s3 import ( "context" "encoding/json" - "fmt" "log" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -29,6 +30,7 @@ func ResourceBucketWebsiteConfiguration() *schema.Resource { ReadWithoutTimeout: resourceBucketWebsiteConfigurationRead, UpdateWithoutTimeout: resourceBucketWebsiteConfigurationUpdate, DeleteWithoutTimeout: resourceBucketWebsiteConfigurationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -89,9 +91,9 @@ func ResourceBucketWebsiteConfiguration() *schema.Resource { Required: true, }, "protocol": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.Protocol_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.Protocol](), }, }, }, @@ -135,9 +137,9 @@ func ResourceBucketWebsiteConfiguration() *schema.Resource { Optional: true, }, "protocol": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.Protocol_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.Protocol](), }, "replace_key_prefix_with": { Type: schema.TypeString, @@ -164,11 +166,11 @@ func ResourceBucketWebsiteConfiguration() *schema.Resource { return json }, }, - "website_endpoint": { + "website_domain": { Type: schema.TypeString, Computed: true, }, - "website_domain": { + "website_endpoint": { Type: schema.TypeString, Computed: true, }, @@ -177,12 +179,9 @@ func ResourceBucketWebsiteConfiguration() *schema.Resource { } func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) - bucket := d.Get("bucket").(string) - expectedBucketOwner := d.Get("expected_bucket_owner").(string) - - websiteConfig := &s3.WebsiteConfiguration{} + websiteConfig := &types.WebsiteConfiguration{} if v, ok := d.GetOk("error_document"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { websiteConfig.ErrorDocument = expandBucketWebsiteConfigurationErrorDocument(v.([]interface{})) @@ -201,120 +200,108 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res } if v, ok := d.GetOk("routing_rules"); ok { - var unmarshalledRules []*s3.RoutingRule + var unmarshalledRules []types.RoutingRule if err := json.Unmarshal([]byte(v.(string)), &unmarshalledRules); err != nil { - return diag.Errorf("creating S3 Bucket (%s) website configuration: %s", bucket, err) + return diag.FromErr(err) } websiteConfig.RoutingRules = unmarshalledRules } + bucket := d.Get("bucket").(string) + expectedBucketOwner := d.Get("expected_bucket_owner").(string) input := &s3.PutBucketWebsiteInput{ Bucket: aws.String(bucket), WebsiteConfiguration: websiteConfig, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.PutBucketWebsiteWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return conn.PutBucketWebsite(ctx, input) + }, errCodeNoSuchBucket) if err != nil { - return diag.Errorf("creating S3 bucket (%s) website configuration: %s", bucket, err) + return diag.Errorf("creating S3 Bucket (%s) Website Configuration: %s", bucket, err) } d.SetId(CreateResourceID(bucket, expectedBucketOwner)) + _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findBucketWebsite(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + return diag.Errorf("waiting for S3 Bucket Accelerate Configuration (%s) create: %s", d.Id(), err) + } + return resourceBucketWebsiteConfigurationRead(ctx, d, meta) } func resourceBucketWebsiteConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { return diag.FromErr(err) } - input := &s3.GetBucketWebsiteInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketWebsiteWithContext(ctx, input) + output, err := findBucketWebsite(ctx, conn, bucket, expectedBucketOwner) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket, ErrCodeNoSuchWebsiteConfiguration) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket Website Configuration (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - if output == nil { - if d.IsNewResource() { - return diag.Errorf("reading S3 bucket website configuration (%s): empty output", d.Id()) - } - log.Printf("[WARN] S3 Bucket Website Configuration (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil + if err != nil { + return diag.Errorf("reading S3 Bucket Website Configuration (%s): %s", d.Id(), err) } d.Set("bucket", bucket) - d.Set("expected_bucket_owner", expectedBucketOwner) - if err := d.Set("error_document", flattenBucketWebsiteConfigurationErrorDocument(output.ErrorDocument)); err != nil { return diag.Errorf("setting error_document: %s", err) } - + d.Set("expected_bucket_owner", expectedBucketOwner) if err := d.Set("index_document", flattenBucketWebsiteConfigurationIndexDocument(output.IndexDocument)); err != nil { return diag.Errorf("setting index_document: %s", err) } - if err := d.Set("redirect_all_requests_to", flattenBucketWebsiteConfigurationRedirectAllRequestsTo(output.RedirectAllRequestsTo)); err != nil { return diag.Errorf("setting redirect_all_requests_to: %s", err) } - if err := d.Set("routing_rule", flattenBucketWebsiteConfigurationRoutingRules(output.RoutingRules)); err != nil { return diag.Errorf("setting routing_rule: %s", err) } - if output.RoutingRules != nil { - rr, err := normalizeRoutingRules(output.RoutingRules) + rr, err := normalizeRoutingRulesV2(output.RoutingRules) if err != nil { - return diag.Errorf("while marshaling routing rules: %s", err) + return diag.FromErr(err) } d.Set("routing_rules", rr) } else { d.Set("routing_rules", nil) } - // Add website_endpoint and website_domain as attributes - websiteEndpoint, err := resourceBucketWebsiteConfigurationWebsiteEndpoint(ctx, meta.(*conns.AWSClient), bucket, expectedBucketOwner) - if err != nil { - return diag.FromErr(err) - } - - if websiteEndpoint != nil { - d.Set("website_endpoint", websiteEndpoint.Endpoint) - d.Set("website_domain", websiteEndpoint.Domain) + if output, err := findBucketLocation(ctx, conn, bucket, expectedBucketOwner); err != nil { + return diag.Errorf("reading S3 Bucket (%s) Location: %s", d.Id(), err) + } else { + website := WebsiteEndpoint(meta.(*conns.AWSClient), bucket, string(output.LocationConstraint)) + d.Set("website_domain", website.Domain) + d.Set("website_endpoint", website.Endpoint) } return nil } func resourceBucketWebsiteConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { return diag.FromErr(err) } - websiteConfig := &s3.WebsiteConfiguration{} + websiteConfig := &types.WebsiteConfiguration{} if v, ok := d.GetOk("error_document"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { websiteConfig.ErrorDocument = expandBucketWebsiteConfigurationErrorDocument(v.([]interface{})) @@ -332,9 +319,9 @@ func resourceBucketWebsiteConfigurationUpdate(ctx context.Context, d *schema.Res if d.HasChange("routing_rule") { websiteConfig.RoutingRules = expandBucketWebsiteConfigurationRoutingRules(d.Get("routing_rule").([]interface{})) } else { - var unmarshalledRules []*s3.RoutingRule + var unmarshalledRules []types.RoutingRule if err := json.Unmarshal([]byte(d.Get("routing_rules").(string)), &unmarshalledRules); err != nil { - return diag.Errorf("updating S3 Bucket (%s) website configuration: %s", bucket, err) + return diag.FromErr(err) } websiteConfig.RoutingRules = unmarshalledRules } @@ -345,9 +332,9 @@ func resourceBucketWebsiteConfigurationUpdate(ctx context.Context, d *schema.Res } if v, ok := d.GetOk("routing_rules"); ok { - var unmarshalledRules []*s3.RoutingRule + var unmarshalledRules []types.RoutingRule if err := json.Unmarshal([]byte(v.(string)), &unmarshalledRules); err != nil { - return diag.Errorf("updating S3 Bucket (%s) website configuration: %s", bucket, err) + return diag.FromErr(err) } websiteConfig.RoutingRules = unmarshalledRules } @@ -357,22 +344,21 @@ func resourceBucketWebsiteConfigurationUpdate(ctx context.Context, d *schema.Res Bucket: aws.String(bucket), WebsiteConfiguration: websiteConfig, } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = conn.PutBucketWebsiteWithContext(ctx, input) + _, err = conn.PutBucketWebsite(ctx, input) if err != nil { - return diag.Errorf("updating S3 bucket website configuration (%s): %s", d.Id(), err) + return diag.Errorf("updating S3 Bucket Website Configuration (%s): %s", d.Id(), err) } return resourceBucketWebsiteConfigurationRead(ctx, d, meta) } func resourceBucketWebsiteConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := ParseResourceID(d.Id()) if err != nil { @@ -382,49 +368,32 @@ func resourceBucketWebsiteConfigurationDelete(ctx context.Context, d *schema.Res input := &s3.DeleteBucketWebsiteInput{ Bucket: aws.String(bucket), } - if expectedBucketOwner != "" { input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = conn.DeleteBucketWebsiteWithContext(ctx, input) + _, err = conn.DeleteBucketWebsite(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket, ErrCodeNoSuchWebsiteConfiguration) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchWebsiteConfiguration) { return nil } if err != nil { - return diag.Errorf("deleting S3 bucket website configuration (%s): %s", d.Id(), err) - } - - return nil -} - -func resourceBucketWebsiteConfigurationWebsiteEndpoint(ctx context.Context, client *conns.AWSClient, bucket, expectedBucketOwner string) (*S3Website, error) { - conn := client.S3Conn(ctx) - - input := &s3.GetBucketLocationInput{ - Bucket: aws.String(bucket), + return diag.Errorf("deleting S3 Bucket Website Configuration (%s): %s", d.Id(), err) } - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } + _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + return findBucketWebsite(ctx, conn, bucket, expectedBucketOwner) + }) - output, err := conn.GetBucketLocationWithContext(ctx, input) if err != nil { - return nil, fmt.Errorf("getting S3 Bucket (%s) Location: %w", bucket, err) - } - - var region string - if output.LocationConstraint != nil { - region = aws.StringValue(output.LocationConstraint) + return diag.Errorf("waiting for S3 Bucket Accelerate Configuration (%s) delete: %s", d.Id(), err) } - return WebsiteEndpoint(client, bucket, region), nil + return nil } -func expandBucketWebsiteConfigurationErrorDocument(l []interface{}) *s3.ErrorDocument { +func expandBucketWebsiteConfigurationErrorDocument(l []interface{}) *types.ErrorDocument { if len(l) == 0 || l[0] == nil { return nil } @@ -434,7 +403,7 @@ func expandBucketWebsiteConfigurationErrorDocument(l []interface{}) *s3.ErrorDoc return nil } - result := &s3.ErrorDocument{} + result := &types.ErrorDocument{} if v, ok := tfMap["key"].(string); ok && v != "" { result.Key = aws.String(v) @@ -443,7 +412,7 @@ func expandBucketWebsiteConfigurationErrorDocument(l []interface{}) *s3.ErrorDoc return result } -func expandBucketWebsiteConfigurationIndexDocument(l []interface{}) *s3.IndexDocument { +func expandBucketWebsiteConfigurationIndexDocument(l []interface{}) *types.IndexDocument { if len(l) == 0 || l[0] == nil { return nil } @@ -453,7 +422,7 @@ func expandBucketWebsiteConfigurationIndexDocument(l []interface{}) *s3.IndexDoc return nil } - result := &s3.IndexDocument{} + result := &types.IndexDocument{} if v, ok := tfMap["suffix"].(string); ok && v != "" { result.Suffix = aws.String(v) @@ -462,7 +431,7 @@ func expandBucketWebsiteConfigurationIndexDocument(l []interface{}) *s3.IndexDoc return result } -func expandBucketWebsiteConfigurationRedirectAllRequestsTo(l []interface{}) *s3.RedirectAllRequestsTo { +func expandBucketWebsiteConfigurationRedirectAllRequestsTo(l []interface{}) *types.RedirectAllRequestsTo { if len(l) == 0 || l[0] == nil { return nil } @@ -472,21 +441,21 @@ func expandBucketWebsiteConfigurationRedirectAllRequestsTo(l []interface{}) *s3. return nil } - result := &s3.RedirectAllRequestsTo{} + result := &types.RedirectAllRequestsTo{} if v, ok := tfMap["host_name"].(string); ok && v != "" { result.HostName = aws.String(v) } if v, ok := tfMap["protocol"].(string); ok && v != "" { - result.Protocol = aws.String(v) + result.Protocol = types.Protocol(v) } return result } -func expandBucketWebsiteConfigurationRoutingRules(l []interface{}) []*s3.RoutingRule { - var results []*s3.RoutingRule +func expandBucketWebsiteConfigurationRoutingRules(l []interface{}) []types.RoutingRule { + var results []types.RoutingRule for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -494,7 +463,7 @@ func expandBucketWebsiteConfigurationRoutingRules(l []interface{}) []*s3.Routing continue } - rule := &s3.RoutingRule{} + rule := types.RoutingRule{} if v, ok := tfMap["condition"].([]interface{}); ok && len(v) > 0 && v[0] != nil { rule.Condition = expandBucketWebsiteConfigurationRoutingRuleCondition(v) @@ -510,7 +479,7 @@ func expandBucketWebsiteConfigurationRoutingRules(l []interface{}) []*s3.Routing return results } -func expandBucketWebsiteConfigurationRoutingRuleCondition(l []interface{}) *s3.Condition { +func expandBucketWebsiteConfigurationRoutingRuleCondition(l []interface{}) *types.Condition { if len(l) == 0 || l[0] == nil { return nil } @@ -520,7 +489,7 @@ func expandBucketWebsiteConfigurationRoutingRuleCondition(l []interface{}) *s3.C return nil } - result := &s3.Condition{} + result := &types.Condition{} if v, ok := tfMap["http_error_code_returned_equals"].(string); ok && v != "" { result.HttpErrorCodeReturnedEquals = aws.String(v) @@ -533,7 +502,7 @@ func expandBucketWebsiteConfigurationRoutingRuleCondition(l []interface{}) *s3.C return result } -func expandBucketWebsiteConfigurationRoutingRuleRedirect(l []interface{}) *s3.Redirect { +func expandBucketWebsiteConfigurationRoutingRuleRedirect(l []interface{}) *types.Redirect { if len(l) == 0 || l[0] == nil { return nil } @@ -543,7 +512,7 @@ func expandBucketWebsiteConfigurationRoutingRuleRedirect(l []interface{}) *s3.Re return nil } - result := &s3.Redirect{} + result := &types.Redirect{} if v, ok := tfMap["host_name"].(string); ok && v != "" { result.HostName = aws.String(v) @@ -554,7 +523,7 @@ func expandBucketWebsiteConfigurationRoutingRuleRedirect(l []interface{}) *s3.Re } if v, ok := tfMap["protocol"].(string); ok && v != "" { - result.Protocol = aws.String(v) + result.Protocol = types.Protocol(v) } if v, ok := tfMap["replace_key_prefix_with"].(string); ok && v != "" { @@ -568,7 +537,7 @@ func expandBucketWebsiteConfigurationRoutingRuleRedirect(l []interface{}) *s3.Re return result } -func flattenBucketWebsiteConfigurationIndexDocument(i *s3.IndexDocument) []interface{} { +func flattenBucketWebsiteConfigurationIndexDocument(i *types.IndexDocument) []interface{} { if i == nil { return []interface{}{} } @@ -576,13 +545,13 @@ func flattenBucketWebsiteConfigurationIndexDocument(i *s3.IndexDocument) []inter m := make(map[string]interface{}) if i.Suffix != nil { - m["suffix"] = aws.StringValue(i.Suffix) + m["suffix"] = aws.ToString(i.Suffix) } return []interface{}{m} } -func flattenBucketWebsiteConfigurationErrorDocument(e *s3.ErrorDocument) []interface{} { +func flattenBucketWebsiteConfigurationErrorDocument(e *types.ErrorDocument) []interface{} { if e == nil { return []interface{}{} } @@ -590,38 +559,32 @@ func flattenBucketWebsiteConfigurationErrorDocument(e *s3.ErrorDocument) []inter m := make(map[string]interface{}) if e.Key != nil { - m["key"] = aws.StringValue(e.Key) + m["key"] = aws.ToString(e.Key) } return []interface{}{m} } -func flattenBucketWebsiteConfigurationRedirectAllRequestsTo(r *s3.RedirectAllRequestsTo) []interface{} { +func flattenBucketWebsiteConfigurationRedirectAllRequestsTo(r *types.RedirectAllRequestsTo) []interface{} { if r == nil { return []interface{}{} } - m := make(map[string]interface{}) - - if r.HostName != nil { - m["host_name"] = aws.StringValue(r.HostName) + m := map[string]interface{}{ + "protocol": string(r.Protocol), } - if r.Protocol != nil { - m["protocol"] = aws.StringValue(r.Protocol) + if r.HostName != nil { + m["host_name"] = aws.ToString(r.HostName) } return []interface{}{m} } -func flattenBucketWebsiteConfigurationRoutingRules(rules []*s3.RoutingRule) []interface{} { +func flattenBucketWebsiteConfigurationRoutingRules(rules []types.RoutingRule) []interface{} { var results []interface{} for _, rule := range rules { - if rule == nil { - continue - } - m := make(map[string]interface{}) if rule.Condition != nil { @@ -638,7 +601,7 @@ func flattenBucketWebsiteConfigurationRoutingRules(rules []*s3.RoutingRule) []in return results } -func flattenBucketWebsiteConfigurationRoutingRuleCondition(c *s3.Condition) []interface{} { +func flattenBucketWebsiteConfigurationRoutingRuleCondition(c *types.Condition) []interface{} { if c == nil { return []interface{}{} } @@ -646,42 +609,147 @@ func flattenBucketWebsiteConfigurationRoutingRuleCondition(c *s3.Condition) []in m := make(map[string]interface{}) if c.KeyPrefixEquals != nil { - m["key_prefix_equals"] = aws.StringValue(c.KeyPrefixEquals) + m["key_prefix_equals"] = aws.ToString(c.KeyPrefixEquals) } if c.HttpErrorCodeReturnedEquals != nil { - m["http_error_code_returned_equals"] = aws.StringValue(c.HttpErrorCodeReturnedEquals) + m["http_error_code_returned_equals"] = aws.ToString(c.HttpErrorCodeReturnedEquals) } return []interface{}{m} } -func flattenBucketWebsiteConfigurationRoutingRuleRedirect(r *s3.Redirect) []interface{} { +func flattenBucketWebsiteConfigurationRoutingRuleRedirect(r *types.Redirect) []interface{} { if r == nil { return []interface{}{} } - m := make(map[string]interface{}) + m := map[string]interface{}{ + "protocol": string(r.Protocol), + } if r.HostName != nil { - m["host_name"] = aws.StringValue(r.HostName) + m["host_name"] = aws.ToString(r.HostName) } if r.HttpRedirectCode != nil { - m["http_redirect_code"] = aws.StringValue(r.HttpRedirectCode) - } - - if r.Protocol != nil { - m["protocol"] = aws.StringValue(r.Protocol) + m["http_redirect_code"] = aws.ToString(r.HttpRedirectCode) } if r.ReplaceKeyWith != nil { - m["replace_key_with"] = aws.StringValue(r.ReplaceKeyWith) + m["replace_key_with"] = aws.ToString(r.ReplaceKeyWith) } if r.ReplaceKeyPrefixWith != nil { - m["replace_key_prefix_with"] = aws.StringValue(r.ReplaceKeyPrefixWith) + m["replace_key_prefix_with"] = aws.ToString(r.ReplaceKeyPrefixWith) } return []interface{}{m} } + +func normalizeRoutingRulesV2(w []types.RoutingRule) (string, error) { + withNulls, err := json.Marshal(w) + if err != nil { + return "", err + } + + var rules []map[string]interface{} + if err := json.Unmarshal(withNulls, &rules); err != nil { + return "", err + } + + var cleanRules []map[string]interface{} + for _, rule := range rules { + cleanRules = append(cleanRules, removeNilOrEmptyProtocol(rule)) + } + + withoutNulls, err := json.Marshal(cleanRules) + if err != nil { + return "", err + } + + return string(withoutNulls), nil +} + +// removeNilOrEmptyProtocol removes nils and empty ("") Protocol values from a RoutingRule JSON document. +func removeNilOrEmptyProtocol(data map[string]interface{}) map[string]interface{} { + withoutNil := make(map[string]interface{}) + + for k, v := range data { + if v == nil { + continue + } + + switch v := v.(type) { + case map[string]interface{}: + withoutNil[k] = removeNilOrEmptyProtocol(v) + case string: + // With AWS SDK for Go v2 Protocol changed type from *string to types.Protocol. + // An empty ("") value is equivalent to nil. + if k == "Protocol" && v == "" { + continue + } + withoutNil[k] = v + default: + withoutNil[k] = v + } + } + + return withoutNil +} + +func findBucketWebsite(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketWebsiteOutput, error) { + input := &s3.GetBucketWebsiteInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketWebsite(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchWebsiteConfiguration) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findBucketLocation(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketLocationOutput, error) { + input := &s3.GetBucketLocationInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketLocation(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} diff --git a/internal/service/s3/bucket_website_configuration_test.go b/internal/service/s3/bucket_website_configuration_test.go index 1020b1861d6..103d2549382 100644 --- a/internal/service/s3/bucket_website_configuration_test.go +++ b/internal/service/s3/bucket_website_configuration_test.go @@ -8,15 +8,15 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3BucketWebsiteConfiguration_basic(t *testing.T) { @@ -26,7 +26,7 @@ func TestAccS3BucketWebsiteConfiguration_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -57,7 +57,7 @@ func TestAccS3BucketWebsiteConfiguration_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -80,7 +80,7 @@ func TestAccS3BucketWebsiteConfiguration_update(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -117,7 +117,7 @@ func TestAccS3BucketWebsiteConfiguration_Redirect(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -146,7 +146,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRule_ConditionAndRedirect(t *tes resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -218,7 +218,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRule_MultipleRules(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -264,7 +264,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRule_RedirectOnly(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -275,7 +275,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRule_RedirectOnly(t *testing.T) resource.TestCheckResourceAttr(resourceName, "routing_rule.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "routing_rule.*", map[string]string{ "redirect.#": "1", - "redirect.0.protocol": s3.ProtocolHttps, + "redirect.0.protocol": string(types.ProtocolHttps), "redirect.0.replace_key_with": "errorpage.html", }), resource.TestCheckResourceAttrSet(resourceName, "routing_rules"), @@ -297,7 +297,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRules_ConditionAndRedirect(t *te resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -325,7 +325,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRules_ConditionAndRedirectWithEm resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -353,7 +353,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRules_updateConditionAndRedirect resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -384,7 +384,7 @@ func TestAccS3BucketWebsiteConfiguration_RoutingRuleToRoutingRules(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -416,7 +416,7 @@ func TestAccS3BucketWebsiteConfiguration_migrate_websiteWithIndexDocumentNoChang resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -448,7 +448,7 @@ func TestAccS3BucketWebsiteConfiguration_migrate_websiteWithIndexDocumentWithCha resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -480,7 +480,7 @@ func TestAccS3BucketWebsiteConfiguration_migrate_websiteWithRoutingRuleNoChange( resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -511,7 +511,7 @@ func TestAccS3BucketWebsiteConfiguration_migrate_websiteWithRoutingRuleWithChang resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -529,7 +529,7 @@ func TestAccS3BucketWebsiteConfiguration_migrate_websiteWithRoutingRuleWithChang testAccCheckBucketWebsiteConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "routing_rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "routing_rule.0.redirect.#", "1"), - resource.TestCheckResourceAttr(resourceName, "routing_rule.0.redirect.0.protocol", s3.ProtocolHttps), + resource.TestCheckResourceAttr(resourceName, "routing_rule.0.redirect.0.protocol", string(types.ProtocolHttps)), resource.TestCheckResourceAttr(resourceName, "routing_rule.0.redirect.0.replace_key_with", "errorpage.html"), ), }, @@ -539,7 +539,7 @@ func TestAccS3BucketWebsiteConfiguration_migrate_websiteWithRoutingRuleWithChang func testAccCheckBucketWebsiteConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_website_configuration" { @@ -551,70 +551,40 @@ func testAccCheckBucketWebsiteConfigurationDestroy(ctx context.Context) resource return err } - input := &s3.GetBucketWebsiteInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } + _, err = tfs3.FindBucketWebsite(ctx, conn, bucket, expectedBucketOwner) - output, err := conn.GetBucketWebsiteWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket, tfs3.ErrCodeNoSuchWebsiteConfiguration) { + if tfresource.NotFound(err) { continue } if err != nil { - return fmt.Errorf("error getting S3 bucket website configuration (%s): %w", rs.Primary.ID, err) + return err } - if output != nil { - return fmt.Errorf("S3 bucket website configuration (%s) still exists", rs.Primary.ID) - } + return fmt.Errorf("S3 Bucket Website Configuration %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckBucketWebsiteConfigurationExists(ctx context.Context, resourceName string) resource.TestCheckFunc { +func testAccCheckBucketWebsiteConfigurationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Resource (%s) ID not set", resourceName) + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) bucket, expectedBucketOwner, err := tfs3.ParseResourceID(rs.Primary.ID) if err != nil { return err } - input := &s3.GetBucketWebsiteInput{ - Bucket: aws.String(bucket), - } - - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketWebsiteWithContext(ctx, input) + _, err = tfs3.FindBucketWebsite(ctx, conn, bucket, expectedBucketOwner) - if err != nil { - return fmt.Errorf("error getting S3 bucket website configuration (%s): %w", rs.Primary.ID, err) - } - - if output == nil { - return fmt.Errorf("S3 Bucket website configuration (%s) not found", rs.Primary.ID) - } - - return nil + return err } } diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index 9269dcd0507..a497465e9fd 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -16,17 +16,18 @@ const ( errCodeNoSuchBucket = "NoSuchBucket" errCodeNoSuchBucketPolicy = "NoSuchBucketPolicy" errCodeNoSuchConfiguration = "NoSuchConfiguration" - ErrCodeNoSuchCORSConfiguration = "NoSuchCORSConfiguration" + errCodeNoSuchCORSConfiguration = "NoSuchCORSConfiguration" ErrCodeNoSuchLifecycleConfiguration = "NoSuchLifecycleConfiguration" errCodeNoSuchKey = "NoSuchKey" ErrCodeNoSuchPublicAccessBlockConfiguration = "NoSuchPublicAccessBlockConfiguration" errCodeNoSuchTagSet = "NoSuchTagSet" errCodeNoSuchTagSetError = "NoSuchTagSetError" - ErrCodeNoSuchWebsiteConfiguration = "NoSuchWebsiteConfiguration" + errCodeNoSuchWebsiteConfiguration = "NoSuchWebsiteConfiguration" errCodeNotImplemented = "NotImplemented" // errCodeObjectLockConfigurationNotFound should be used with tfawserr.ErrCodeContains, not tfawserr.ErrCodeEquals. // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/26317 errCodeObjectLockConfigurationNotFound = "ObjectLockConfigurationNotFound" + errCodeObjectLockConfigurationNotFoundError = "ObjectLockConfigurationNotFoundError" errCodeOperationAborted = "OperationAborted" ErrCodeReplicationConfigurationNotFound = "ReplicationConfigurationNotFoundError" errCodeServerSideEncryptionConfigurationNotFound = "ServerSideEncryptionConfigurationNotFoundError" diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index d4a9f067c99..c4f6d2c74d8 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -5,12 +5,20 @@ package s3 // Exports for use in tests only. var ( - DeleteAllObjectVersions = deleteAllObjectVersions - EmptyBucket = emptyBucket - FindBucket = findBucket - FindBucketAccelerateConfiguration = findBucketAccelerateConfiguration - FindBucketPolicy = findBucketPolicy - FindBucketVersioning = findBucketVersioning - FindObjectByBucketAndKey = findObjectByBucketAndKey - SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey + DeleteAllObjectVersions = deleteAllObjectVersions + EmptyBucket = emptyBucket + FindAnalyticsConfiguration = findAnalyticsConfiguration + FindBucket = findBucket + FindBucketACL = findBucketACL + FindBucketAccelerateConfiguration = findBucketAccelerateConfiguration + FindBucketPolicy = findBucketPolicy + FindBucketVersioning = findBucketVersioning + FindBucketWebsite = findBucketWebsite + FindCORSRules = findCORSRules + FindObjectByBucketAndKey = findObjectByBucketAndKey + FindObjectLockConfiguration = findObjectLockConfiguration + FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration + SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey + + ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration ) diff --git a/internal/service/s3/sweep.go b/internal/service/s3/sweep.go index 078bd3af9d4..997d53abcd4 100644 --- a/internal/service/s3/sweep.go +++ b/internal/service/s3/sweep.go @@ -73,9 +73,23 @@ func sweepObjects(region string) error { sweepables := make([]sweep.Sweepable, 0) for _, bucket := range buckets { + bucket := aws.ToString(bucket.Name) + objLockConfig, err := findObjectLockConfiguration(ctx, conn, bucket, "") + + var objectLockEnabled bool + + if tfresource.NotFound(err) { + } else if err != nil { + log.Printf("[WARN] Reading S3 Bucket Object Lock Configuration (%s): %s", bucket, err) + continue + } else { + objectLockEnabled = objLockConfig.ObjectLockEnabled == types.ObjectLockEnabledEnabled + } + sweepables = append(sweepables, objectSweeper{ - conn: conn, - name: aws.ToString(bucket.Name), + conn: conn, + bucket: bucket, + locked: objectLockEnabled, }) } @@ -89,15 +103,16 @@ func sweepObjects(region string) error { } type objectSweeper struct { - conn *s3.Client - name string + conn *s3.Client + bucket string + locked bool } func (os objectSweeper) Delete(ctx context.Context, timeout time.Duration, optFns ...tfresource.OptionsFunc) error { // Delete everything including locked objects. - _, err := deleteAllObjectVersions(ctx, os.conn, os.name, "", true, true) + _, err := deleteAllObjectVersions(ctx, os.conn, os.bucket, "", os.locked, true) if err != nil { - return fmt.Errorf("deleting S3 Bucket (%s) objects: %w", os.name, err) + return fmt.Errorf("deleting S3 Bucket (%s) objects: %w", os.bucket, err) } return nil } diff --git a/website/docs/r/dms_replication_config.html.markdown b/website/docs/r/dms_replication_config.html.markdown new file mode 100644 index 00000000000..a4274564dc2 --- /dev/null +++ b/website/docs/r/dms_replication_config.html.markdown @@ -0,0 +1,105 @@ +--- +subcategory: "DMS (Database Migration)" +layout: "aws" +page_title: "AWS: aws_dms_replication_config" +description: |- + Provides a DMS Serverless replication config resource. +--- + +# Resource: aws_dms_replication_config + +Provides a DMS Serverless replication config resource. + +~> **NOTE:** Changing most arguments will stop the replication if it is running. You can set `start_replication` to resume the replication afterwards. + +## Example Usage + +```terraform +resource "aws_dms_replication_config" "name" { + replication_config_identifier = "test-dms-serverless-replication-tf" + resource_identifier = "test-dms-serverless-replication-tf" + replication_type = "cdc" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn + table_mappings = <