From b7356a7d5fca49da9ad751ecb2e70bb1425e8149 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:55:05 -0700 Subject: [PATCH 01/80] init setup for replication configuration resource Blocking out general structure for new independent resource for managing the s3 bucket replication configuration settings Pulling over logic from resource s3 bucket to start with --- aws/resource_aws_s3_bucket.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 54af265517f..25dff08edf1 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -789,7 +789,7 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("replication_configuration") { - if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil { + if err := resourceAwsS3BucketInternalReplicationConfigurationUpdate(s3conn, d); err != nil { return err } } @@ -1998,7 +1998,7 @@ func resourceAwsS3BucketObjectLockConfigurationUpdate(s3conn *s3.S3, d *schema.R return nil } -func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { +func resourceAwsS3BucketInternalReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) replicationConfiguration := d.Get("replication_configuration").([]interface{}) From d8acd7a94e7aed1f59d20ac2baf4779a7089ee10 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:57:33 -0700 Subject: [PATCH 02/80] adding new resource for replication configurations --- aws/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/provider.go b/aws/provider.go index 3f9f86dacc4..5641b36adb4 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -1047,6 +1047,7 @@ func Provider() *schema.Provider { "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), "aws_s3_bucket_metric": resourceAwsS3BucketMetric(), "aws_s3_bucket_inventory": resourceAwsS3BucketInventory(), + "aws_s3_bucket_replication_configuration": resourceAwsS3BucketReplicationConfiguration(), "aws_s3_object_copy": resourceAwsS3ObjectCopy(), "aws_s3control_bucket": resourceAwsS3ControlBucket(), "aws_s3control_bucket_policy": resourceAwsS3ControlBucketPolicy(), From 583894f05786fd586a647e481ea334f7d40515fe Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 11:35:47 -0700 Subject: [PATCH 03/80] cleanup and remove unneeded logic --- ...aws_s3_bucket_replication_configuration.go | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go new file mode 100644 index 00000000000..400a2f12c5b --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -0,0 +1,215 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketReplicationConfigurationCreate, + Read: resourceAwsS3BucketReplicationConfigurationRead, + Update: resourceAwsS3BucketReplicationConfigurationUpdate, + Delete: resourceAwsS3BucketReplicationConfigurationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"bucket_prefix"}, + ValidateFunc: validation.StringLenBetween(0, 63), + }, + "role": { + Type: schema.TypeString, + Required: true, + }, + "rules": { + Type: schema.TypeSet, + Required: true, + Set: rulesHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), + }, + "destination": { + Type: schema.TypeList, + MaxItems: 1, + MinItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsAccountId, + }, + "bucket": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + }, + "replica_kms_key_id": { + Type: schema.TypeString, + Optional: true, + }, + "access_control_translation": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + }, + }, + }, + }, + }, + }, + }, + "source_selection_criteria": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sse_kms_encrypted_objects": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "tags": tagsSchema(), + }, + }, + }, + "delete_marker_replication_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + }, + }, + }, + }, + + "tags": tagsSchema(), + "tags_all": tagsSchemaComputed(), + }, + + CustomizeDiff: SetTagsDiff, + } +} + +func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + input := &s3.HeadBucketInput{ + Bucket: aws.String(d.Get("bucket").(string)), + } + + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + _, err := s3conn.HeadBucket(input) + + if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + return resource.RetryableError(err) + } + + if d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + // Read the bucket replication configuration + replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(d.Get("bucket").(string)), + }) + }) + if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { + return fmt.Errorf("error getting S3 Bucket replication: %s", err) + } + replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) + if !ok || replication == nil { + return fmt.Errorf("error reading replication_configuration") + } + + return nil +} + +func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + + return nil +} From 457754ae5aefa05111cee5ebdb29cd30e2b2d045 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:14:09 -0700 Subject: [PATCH 04/80] WIP setup update processes --- ...aws_s3_bucket_replication_configuration.go | 152 ++++++++++++++++-- 1 file changed, 142 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 400a2f12c5b..74af667e458 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -23,12 +26,11 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"bucket_prefix"}, - ValidateFunc: validation.StringLenBetween(0, 63), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(0, 63), }, "role": { Type: schema.TypeString, @@ -164,10 +166,6 @@ func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, m return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } -func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - return resourceAwsS3BucketReplicationConfigurationRead(d, meta) -} - func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn @@ -209,6 +207,140 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil } +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + rc := &s3.ReplicationConfiguration{} + if val, ok := d.GetOk("role"); ok { + rc.Role = aws.String(val.(string)) + } + + rcRules := d.Get("rules").(*schema.Set).List() + rules := []*s3.ReplicationRule{} + for _, v := range rcRules { + rr := v.(map[string]interface{}) + rcRule := &s3.ReplicationRule{} + if status, ok := rr["status"]; ok && status != "" { + rcRule.Status = aws.String(status.(string)) + } else { + continue + } + + if rrid, ok := rr["id"]; ok && rrid != "" { + rcRule.ID = aws.String(rrid.(string)) + } + + ruleDestination := &s3.Destination{} + if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { + if dest[0] != nil { + bd := dest[0].(map[string]interface{}) + ruleDestination.Bucket = aws.String(bd["bucket"].(string)) + + if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { + ruleDestination.StorageClass = aws.String(storageClass.(string)) + } + + if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" { + ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)), + } + } + + if account, ok := bd["account_id"]; ok && account != "" { + ruleDestination.Account = aws.String(account.(string)) + } + + if aclTranslation, ok := bd["access_control_translation"].([]interface{}); ok && len(aclTranslation) > 0 { + aclTranslationValues := aclTranslation[0].(map[string]interface{}) + ruleAclTranslation := &s3.AccessControlTranslation{} + ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) + ruleDestination.AccessControlTranslation = ruleAclTranslation + } + } + } + rcRule.Destination = ruleDestination + + if ssc, ok := rr["source_selection_criteria"].([]interface{}); ok && len(ssc) > 0 { + if ssc[0] != nil { + sscValues := ssc[0].(map[string]interface{}) + ruleSsc := &s3.SourceSelectionCriteria{} + if sseKms, ok := sscValues["sse_kms_encrypted_objects"].([]interface{}); ok && len(sseKms) > 0 { + if sseKms[0] != nil { + sseKmsValues := sseKms[0].(map[string]interface{}) + sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} + if sseKmsValues["enabled"].(bool) { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) + } else { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) + } + ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects + } + } + rcRule.SourceSelectionCriteria = ruleSsc + } + } + + if f, ok := rr["filter"].([]interface{}); ok && len(f) > 0 && f[0] != nil { + // XML schema V2. + rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) + rcRule.Filter = &s3.ReplicationRuleFilter{} + filter := f[0].(map[string]interface{}) + tags := keyvaluetags.New(filter["tags"]).IgnoreAws().S3Tags() + if len(tags) > 0 { + rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(filter["prefix"].(string)), + Tags: tags, + } + } else { + rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) + } + + if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(dmr), + } + } else { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + } + } + } else { + // XML schema V1. + rcRule.Prefix = aws.String(rr["prefix"].(string)) + } + + rules = append(rules, rcRule) + } + + rc.Rules = rules + i := &s3.PutBucketReplicationInput{ + Bucket: aws.String(bucket), + ReplicationConfiguration: rc, + } + log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := s3conn.PutBucketReplication(i) + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketReplication(i) + } + if err != nil { + return fmt.Errorf("Error putting S3 replication configuration: %s", err) + } + + return nil + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { return nil From 22360e8bd646550edd38d6f7b924c135b0a5fccb Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:15:06 -0700 Subject: [PATCH 05/80] WIP pull in tests from s3 bucket resource --- ...3_bucket_replication_configuration_test.go | 1461 +++++++++++++++++ 1 file changed, 1461 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration_test.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go new file mode 100644 index 00000000000..ed348eb3c91 --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -0,0 +1,1461 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "regexp" + "testing" +) + +func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "GLACIER"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassGlacier), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "prefix3", + "filter.0.tags.%": "1", + "filter.0.tags.Key3": "Value3", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { + // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + +// StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 +func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { + rInt := acctest.RandInt() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), + ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), + }, + }, + }) +} + +// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 +func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + }, + }, + }, + Priority: aws.Int64(42), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String("foo"), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + }, + }, + }, + Priority: aws.Int64(41), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + { + Key: aws.String("Foo"), + Value: aws.String("Bar"), + }, + }, + }, + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationResourceName := "aws_s3_bucket.destination" + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExists(destinationResourceName), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + +func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { + return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = "tf-iam-role-replication-%[1]d" + + assume_role_policy = < Date: Thu, 19 Aug 2021 11:04:42 -0700 Subject: [PATCH 06/80] WIP ensure create/read/update logic is operational --- ...aws_s3_bucket_replication_configuration.go | 127 +++++++++++++++++- ...3_bucket_replication_configuration_test.go | 33 ++++- 2 files changed, 153 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 74af667e458..4f8cc098c2a 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,8 +2,8 @@ package aws import ( "fmt" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "log" + "net/http" "time" "github.com/aws/aws-sdk-go/aws" @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ - Create: resourceAwsS3BucketReplicationConfigurationCreate, + Create: resourceAwsS3BucketReplicationConfigurationPut, Read: resourceAwsS3BucketReplicationConfigurationRead, Update: resourceAwsS3BucketReplicationConfigurationUpdate, Delete: resourceAwsS3BucketReplicationConfigurationDelete, @@ -162,7 +164,16 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { } } -func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { +func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta interface{}) error { + // Get the bucket + var bucket string + if v, ok := d.GetOk("bucket"); ok { + bucket = v.(string) + } else { + // fail, can't do anything without a bucket + } + d.SetId(bucket) + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } @@ -176,7 +187,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) - if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { return resource.RetryableError(err) } @@ -190,6 +201,29 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil }) + + if tfresource.TimedOut(err) { + _, err = s3conn.HeadBucket(input) + } + + if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) + } + + if _, ok := d.GetOk("bucket"); !ok { + d.Set("bucket", d.Id()) + } + // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ @@ -203,6 +237,90 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if !ok || replication == nil { return fmt.Errorf("error reading replication_configuration") } + r := replication.ReplicationConfiguration + // set role + if r.Role != nil && aws.StringValue(r.Role) != "" { + d.Set("role", aws.StringValue(r.Role)) + } + + // set rules, these need to be flattened + rules := make([]interface{}, 0, len(r.Rules)) + for _, v := range r.Rules { + t := make(map[string]interface{}) + if v.Destination != nil { + rd := make(map[string]interface{}) + if v.Destination.Bucket != nil { + rd["bucket"] = aws.StringValue(v.Destination.Bucket) + } + if v.Destination.StorageClass != nil { + rd["storage_class"] = aws.StringValue(v.Destination.StorageClass) + } + if v.Destination.EncryptionConfiguration != nil { + if v.Destination.EncryptionConfiguration.ReplicaKmsKeyID != nil { + rd["replica_kms_key_id"] = aws.StringValue(v.Destination.EncryptionConfiguration.ReplicaKmsKeyID) + } + } + if v.Destination.Account != nil { + rd["account_id"] = aws.StringValue(v.Destination.Account) + } + if v.Destination.AccessControlTranslation != nil { + rdt := map[string]interface{}{ + "owner": aws.StringValue(v.Destination.AccessControlTranslation.Owner), + } + rd["access_control_translation"] = []interface{}{rdt} + } + t["destination"] = []interface{}{rd} + } + + if v.ID != nil { + t["id"] = aws.StringValue(v.ID) + } + if v.Prefix != nil { + t["prefix"] = aws.StringValue(v.Prefix) + } + if v.Status != nil { + t["status"] = aws.StringValue(v.Status) + } + if vssc := v.SourceSelectionCriteria; vssc != nil { + tssc := make(map[string]interface{}) + if vssc.SseKmsEncryptedObjects != nil { + tSseKms := make(map[string]interface{}) + if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { + tSseKms["enabled"] = true + } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { + tSseKms["enabled"] = false + } + tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} + } + t["source_selection_criteria"] = []interface{}{tssc} + } + + if v.Priority != nil { + t["priority"] = int(aws.Int64Value(v.Priority)) + } + + if f := v.Filter; f != nil { + m := map[string]interface{}{} + if f.Prefix != nil { + m["prefix"] = aws.StringValue(f.Prefix) + } + if t := f.Tag; t != nil { + m["tags"] = keyvaluetags.S3KeyValueTags([]*s3.Tag{t}).IgnoreAws().Map() + } + if a := f.And; a != nil { + m["prefix"] = aws.StringValue(a.Prefix) + m["tags"] = keyvaluetags.S3KeyValueTags(a.Tags).IgnoreAws().Map() + } + t["filter"] = []interface{}{m} + + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { + t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + } + } + + rules = append(rules, t) + } + d.Set("rules", schema.NewSet(rulesHash, rules)) return nil } @@ -337,7 +455,6 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m return fmt.Errorf("Error putting S3 replication configuration: %s", err) } - return nil return resourceAwsS3BucketReplicationConfigurationRead(d, meta) } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index ed348eb3c91..c12697906f4 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,15 +2,44 @@ package aws import ( "fmt" + "regexp" + "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "regexp" - "testing" ) +func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { + rInt := acctest.RandInt() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + ), + }, + }, + }) +} + func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() From 2d0f9cea737d7815c09c9756b188bee1502ba1ff Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 19 Aug 2021 14:05:53 -0700 Subject: [PATCH 07/80] basic tests passing --- ...aws_s3_bucket_replication_configuration.go | 5 - ...3_bucket_replication_configuration_test.go | 92 +++++-------------- 2 files changed, 23 insertions(+), 74 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4f8cc098c2a..5742d810d4b 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -155,12 +155,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, - - "tags": tagsSchema(), - "tags_all": tagsSchemaComputed(), }, - - CustomizeDiff: SetTagsDiff, } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c12697906f4..f872ab698f1 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -12,34 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { - rInt := acctest.RandInt() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - ), - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() @@ -902,6 +874,10 @@ resource "aws_s3_bucket" "destination" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "source" { @@ -910,6 +886,10 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } `, randInt) } @@ -945,6 +925,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -954,6 +937,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1014,6 +1000,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -1023,6 +1012,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1094,6 +1086,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1316,47 +1311,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { `) } -func testAccAWSS3BucketReplicationConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination string) string { - return composeConfig(testAccAWSS3BucketReplicationConfig_iamPolicy(rName), fmt.Sprintf(` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = %[1]q - acl = "private" - - versioning { - enabled = true - } - - replication_configuration { - role = aws_iam_role.test.arn - - rules { - id = "testid" - status = "Enabled" - - filter { - prefix = "testprefix" - } - - delete_marker_replication_status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } - } -} - -resource "aws_s3_bucket_replication_configuration" "destination" { - bucket = %[2]q - - versioning { - enabled = true - } -} -`, rName, rNameDestination)) -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { From c08a294393ccde4d025dd1721cbf3d4037547a7b Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 15:27:31 -0700 Subject: [PATCH 08/80] Update expected resource names Rename resource names to reflect new position in configuration scope of the independent resource. Use literal strings instead of fmt.Sprint in hcl concatination --- ...3_bucket_replication_configuration_test.go | 169 +++++++----------- 1 file changed, 66 insertions(+), 103 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index f872ab698f1..4ac4e078867 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -109,7 +108,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -130,9 +129,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -141,7 +139,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -150,7 +148,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -176,7 +174,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -197,9 +195,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -208,7 +205,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -218,7 +215,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -247,7 +244,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -267,9 +264,8 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -278,7 +274,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -306,7 +302,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -324,9 +320,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -358,9 +353,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -399,7 +393,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -417,9 +411,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -448,9 +441,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -480,7 +472,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -512,35 +504,12 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { - rInt := acctest.RandInt() - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), - ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), - }, - }, - }) -} - // Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -578,7 +547,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -596,9 +565,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -626,9 +594,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -663,9 +630,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -701,9 +667,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -743,9 +708,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -790,7 +754,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" rName := acctest.RandomWithPrefix("tf-acc-test") destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") @@ -805,9 +769,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -943,7 +906,7 @@ resource "aws_s3_bucket" "destination3" { } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn rules { @@ -984,7 +947,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } - } + } `, randInt)) } @@ -1131,7 +1094,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_kms_key" "replica" { provider = "awsalternate" description = "TF Acceptance Test S3 repl KMS key" @@ -1160,11 +1123,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1187,11 +1150,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1210,11 +1173,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_kms_key" "replica" { @@ -1250,11 +1213,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1269,11 +1232,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1288,11 +1251,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1308,11 +1271,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1331,11 +1294,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1356,11 +1319,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1383,11 +1346,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1413,11 +1376,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1440,5 +1403,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } From 61573bdd180ef682eac03fef31c9fcc49e90193d Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 16:03:59 -0700 Subject: [PATCH 09/80] Guard against missing bucket or import id Ensure that the source bucket name is configured in the HCL Ensure that when importing the bucket name is passed in to the process as the import id value --- ...aws_s3_bucket_replication_configuration.go | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 5742d810d4b..1eb8c033488 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -1,6 +1,7 @@ package aws import ( + "errors" "fmt" "log" "net/http" @@ -165,7 +166,8 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta if v, ok := d.GetOk("bucket"); ok { bucket = v.(string) } else { - // fail, can't do anything without a bucket + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") } d.SetId(bucket) @@ -173,12 +175,24 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta } func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - input := &s3.HeadBucketInput{ - Bucket: aws.String(d.Get("bucket").(string)), + if _, ok := d.GetOk("bucket"); !ok { + // during import operations, use the supplied ID for the bucket name + d.Set("bucket", d.Id()) + } + + var bucket *string + input := &s3.HeadBucketInput{} + if rsp, ok := d.GetOk("bucket"); !ok { + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") + } else { + bucket = aws.String(rsp.(string)) + input.Bucket = bucket } + s3conn := meta.(*AWSClient).s3conn + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) @@ -215,14 +229,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) } - if _, ok := d.GetOk("bucket"); !ok { - d.Set("bucket", d.Id()) - } - // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: aws.String(d.Get("bucket").(string)), + Bucket: bucket, }) }) if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { From eef6a43311e4d53a4f459a3f34c5e51a39b9bfa8 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 24 Aug 2021 16:11:06 -0700 Subject: [PATCH 10/80] Cleanout and relocate testing logic Relocate replication testing helper functions out of the s3 bucket tests and into the replication configuration testing file. Remove s3 bucket existance checks from replication testing per does not apply to the replication resource logic. --- ...3_bucket_replication_configuration_test.go | 290 ++++++++++-------- aws/resource_aws_s3_bucket_test.go | 64 ---- 2 files changed, 155 insertions(+), 199 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 4ac4e078867..e9bf0f3c449 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "reflect" + "sort" + "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -9,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { @@ -106,8 +110,6 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -125,10 +127,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -172,8 +170,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -191,10 +187,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -242,8 +234,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -261,9 +251,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -299,7 +286,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -319,7 +305,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -352,7 +337,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -390,7 +374,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -410,7 +393,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -440,7 +422,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -470,8 +451,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -488,10 +467,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), + Check: resource.ComposeTestCheckFunc(), }, { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -504,47 +480,8 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 -func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { - rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -564,10 +501,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -593,10 +528,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -629,10 +562,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -666,10 +597,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -707,10 +636,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -755,8 +682,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() rName := acctest.RandomWithPrefix("tf-acc-test") - destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -766,12 +693,10 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExists(resourceName), testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -795,7 +720,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { ), }, { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, @@ -806,8 +731,71 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + for _, rule := range rules { + if dest := rule.Destination; dest != nil { + if account := dest.Account; account != nil && strings.HasPrefix(aws.StringValue(dest.Account), "${") { + resourceReference := strings.Replace(aws.StringValue(dest.Account), "${", "", 1) + resourceReference = strings.Replace(resourceReference, "}", "", 1) + resourceReferenceParts := strings.Split(resourceReference, ".") + resourceAttribute := resourceReferenceParts[len(resourceReferenceParts)-1] + resourceName := strings.Join(resourceReferenceParts[:len(resourceReferenceParts)-1], ".") + value := s.RootModule().Resources[resourceName].Primary.Attributes[resourceAttribute] + dest.Account = aws.String(value) + } + if ec := dest.EncryptionConfiguration; ec != nil { + if ec.ReplicaKmsKeyID != nil { + key_arn := s.RootModule().Resources["aws_kms_key.replica"].Primary.Attributes["arn"] + ec.ReplicaKmsKeyID = aws.String(strings.Replace(*ec.ReplicaKmsKeyID, "${aws_kms_key.replica.arn}", key_arn, -1)) + } + } + } + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + + conn := testAccProvider.Meta().(*AWSClient).s3conn + out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(rs.Primary.ID), + }) + if err != nil { + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + return fmt.Errorf("S3 bucket not found") + } + if rules == nil { + return nil + } + return fmt.Errorf("GetReplicationConfiguration error: %v", err) + } + + for _, rule := range out.ReplicationConfiguration.Rules { + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + if !reflect.DeepEqual(out.ReplicationConfiguration.Rules, rules) { + return fmt.Errorf("bad replication rules, expected: %v, got %v", rules, out.ReplicationConfiguration.Rules) + } + + return nil + } +} + func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { - return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` + return fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_iam_role" "role" { @@ -853,8 +841,7 @@ resource "aws_s3_bucket" "source" { lifecycle { ignore_changes = [replication_configuration] } -} -`, randInt) +} `, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { @@ -873,8 +860,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "%[1]s" } } -} -`, storageClass) +} `, storageClass) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { @@ -948,8 +934,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1034,8 +1019,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1089,8 +1073,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1122,8 +1105,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1149,8 +1131,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1172,8 +1153,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1212,8 +1192,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { @@ -1231,8 +1210,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.destination.arn } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { @@ -1250,28 +1228,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` -} - -func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { @@ -1293,8 +1250,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { @@ -1318,8 +1274,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { @@ -1345,8 +1300,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { @@ -1375,8 +1329,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { @@ -1402,6 +1355,73 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } +} ` } -` + +func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Mon, 30 Aug 2021 10:19:46 -0700 Subject: [PATCH 11/80] Support Existing Object Replication Adding schema for ExistingObjectReplication configuration Adding read logic to identify ExistingObjectReplication configurations added to replication rules Adding update logic to include ExistingObjectReplicaiton configuration in the PutBucketReplicaiton input --- ...aws_s3_bucket_replication_configuration.go | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1eb8c033488..09a75e12c6f 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -148,6 +148,21 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "existing_object_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + }, + }, + }, + }, "delete_marker_replication_status": { Type: schema.TypeString, Optional: true, @@ -248,7 +263,6 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met d.Set("role", aws.StringValue(r.Role)) } - // set rules, these need to be flattened rules := make([]interface{}, 0, len(r.Rules)) for _, v := range r.Rules { t := make(map[string]interface{}) @@ -277,6 +291,12 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met t["destination"] = []interface{}{rd} } + if v.ExistingObjectReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) + t["existing_object_replication"] = status + } + if v.ID != nil { t["id"] = aws.StringValue(v.ID) } @@ -354,6 +374,14 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.ID = aws.String(rrid.(string)) } + eor := rr["existing_object_replication"].([]interface{}) + if len(eor) > 0 { + s := eor[0].(map[string]interface{}) + rcRule.ExistingObjectReplication = &s3.ExistingObjectReplication{ + Status: aws.String(s["status"].(string)), + } + } + ruleDestination := &s3.Destination{} if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { if dest[0] != nil { From 796c1cba6315c317108a9874d5dfacd45afc9808 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 30 Aug 2021 10:23:04 -0700 Subject: [PATCH 12/80] Testing for ExistingObjectReplication In order for ExistingObjectReplication to work on s3 buckets, a request to AWS Technical Support needs to be made. Once they allow the configuration the test will operate as expected. --- ...3_bucket_replication_configuration_test.go | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e9bf0f3c449..31bf253c698 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -731,6 +731,68 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +const isExistingObjectReplicationBlocked = true + +func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { + if isExistingObjectReplicationBlocked { + /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. + Once that request is approved, this can be unblocked for testing. */ + return + } + resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + ExistingObjectReplication: &s3.ExistingObjectReplication{ + Status: aws.String(s3.ExistingObjectReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] @@ -1425,3 +1487,76 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } `, rName, rNameDestination, rInt) } + +func testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Wed, 1 Sep 2021 08:39:13 -0700 Subject: [PATCH 13/80] Adding support for Replication Time Control new schema definition for "replication_time" along with update and read logic. tracking upstream changes, adopt "waiter" module --- ...aws_s3_bucket_replication_configuration.go | 62 +++++++++++++++- ...3_bucket_replication_configuration_test.go | 72 +++++++++++++++++++ 2 files changed, 131 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 09a75e12c6f..4fd9a8ce7c7 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -91,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "replication_time": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + }, + "time": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -157,7 +188,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), }, }, @@ -208,7 +239,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met s3conn := meta.(*AWSClient).s3conn - err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + err := resource.Retry(waiter.BucketCreatedTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { @@ -288,10 +319,20 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } rd["access_control_translation"] = []interface{}{rdt} } + if v.Destination.ReplicationTime != nil { + if v.Destination.ReplicationTime.Status != nil { + rd["replication_time"] = map[string]interface{}{ + "status": v.Destination.ReplicationTime.Status, + "time": map[string]interface{}{ + "minutes": v.Destination.ReplicationTime.Time.Minutes, + }, + } + } + } t["destination"] = []interface{}{rd} } - if v.ExistingObjectReplication.Status != nil { + if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) t["existing_object_replication"] = status @@ -408,6 +449,21 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) ruleDestination.AccessControlTranslation = ruleAclTranslation } + + rt, ok := bd["replication_time"].([]interface{}) + if ok && len(rt) > 0 { + s := rt[0].(map[string]interface{}) + if t, ok := s["time"].([]interface{}); ok && len(t) > 0 { + m := t[0].(map[string]interface{}) + ruleDestination.ReplicationTime = &s3.ReplicationTime{ + Status: aws.String(s["status"].(string)), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 31bf253c698..7d44e94c166 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -448,6 +448,54 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo }) } +func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigRTC(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + ReplicationTime: &s3.ReplicationTime{ + Status: aws.String(s3.ReplicationTimeStatusEnabled), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -925,6 +973,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } `, storageClass) } +func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } + } + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), From cd5556b8ea9e0e5cd3b7ea0ad7bfca6b389aef86 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 14:29:24 -0700 Subject: [PATCH 14/80] Adding Metrics support Metrics are a requirement for the Replication Time Control functionality. Adding it here. Restructure the configuration read logic for Replication Time to be more correct and inline with expected data structures Update tests to reflect changes --- ...aws_s3_bucket_replication_configuration.go | 66 +++++++++++++++++-- ...3_bucket_replication_configuration_test.go | 27 ++++++-- 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4fd9a8ce7c7..1cf97c4b1d8 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -92,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "metrics": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + }, + "event_threshold": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, "replication_time": { Type: schema.TypeList, Optional: true, @@ -320,13 +350,23 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met rd["access_control_translation"] = []interface{}{rdt} } if v.Destination.ReplicationTime != nil { + drt := make(map[string]interface{}) if v.Destination.ReplicationTime.Status != nil { - rd["replication_time"] = map[string]interface{}{ - "status": v.Destination.ReplicationTime.Status, - "time": map[string]interface{}{ - "minutes": v.Destination.ReplicationTime.Time.Minutes, - }, - } + drt["status"] = aws.StringValue(v.Destination.ReplicationTime.Status) + drtm := make(map[string]interface{}) + drtm["minutes"] = aws.Int64Value(v.Destination.ReplicationTime.Time.Minutes) + drt["time"] = []interface{}{drtm} + rd["replication_time"] = []interface{}{drt} + } + } + if v.Destination.Metrics != nil { + dm := make(map[string]interface{}) + if v.Destination.Metrics.Status != nil { + dm["status"] = aws.StringValue(v.Destination.Metrics.Status) + dmetm := make(map[string]interface{}) + dmetm["minutes"] = aws.Int64Value(v.Destination.Metrics.EventThreshold.Minutes) + dm["event_threshold"] = []interface{}{dmetm} + rd["metrics"] = []interface{}{dm} } } t["destination"] = []interface{}{rd} @@ -464,6 +504,20 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } } + rm, ok := bd["metrics"].([]interface{}) + if ok && len(rm) > 0 { + s := rm[0].(map[string]interface{}) + if et, ok := s["event_threshold"].([]interface{}); ok && len(et) > 0 { + m := et[0].(map[string]interface{}) + ruleDestination.Metrics = &s3.Metrics{ + Status: aws.String(s["status"].(string)), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 7d44e94c166..70ff1e4cea9 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -475,7 +475,8 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { resourceName, []*s3.ReplicationRule{ { - ID: aws.String("foobar"), + ID: aws.String("foobar"), + Priority: aws.Int64(0), Destination: &s3.Destination{ Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), ReplicationTime: &s3.ReplicationTime{ @@ -484,8 +485,19 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { Minutes: aws.Int64(15), }, }, + Metrics: &s3.Metrics{ + Status: aws.String(s3.MetricsStatusEnabled), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), }, - Prefix: aws.String("foo"), Status: aws.String(s3.ReplicationRuleStatusEnabled), }, }, @@ -981,9 +993,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { rules { id = "foobar" - prefix = "foo" + filter { + prefix = "foo" + } status = "Enabled" - destination { bucket = aws_s3_bucket.destination.arn replication_time { @@ -992,6 +1005,12 @@ resource "aws_s3_bucket_replication_configuration" "replication" { minutes = 15 } } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } + } } } }` From b398f35f9fd9c852bc49e1ae64e35a679ceb9133 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 16:14:24 -0700 Subject: [PATCH 15/80] Adding Replica Modifications support, with tests Update the the source_selection_criteria configuration to include the replica_modificaions. Refactored sse_kms_encrypted_objects schema to map closer to the actual AWS SDK structure. --- ...aws_s3_bucket_replication_configuration.go | 44 ++++++--- ...3_bucket_replication_configuration_test.go | 99 +++++++++++++++---- 2 files changed, 109 insertions(+), 34 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1cf97c4b1d8..8cfffba4044 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -159,7 +159,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "sse_kms_encrypted_objects": { @@ -169,9 +169,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + }, + }, + }, + }, + "replica_modifications": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), }, }, }, @@ -391,11 +407,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met tssc := make(map[string]interface{}) if vssc.SseKmsEncryptedObjects != nil { tSseKms := make(map[string]interface{}) - if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { - tSseKms["enabled"] = true - } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { - tSseKms["enabled"] = false - } + tSseKms["status"] = aws.StringValue(vssc.SseKmsEncryptedObjects.Status) tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} } t["source_selection_criteria"] = []interface{}{tssc} @@ -530,14 +542,18 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m if sseKms[0] != nil { sseKmsValues := sseKms[0].(map[string]interface{}) sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} - if sseKmsValues["enabled"].(bool) { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) - } else { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) - } + sseKmsEncryptedObjects.Status = aws.String(sseKmsValues["status"].(string)) ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects } } + if sscRm, ok := sscValues["replica_modifications"].([]interface{}); ok && len(sscRm) > 0 { + if sscRm[0] != nil { + replicaModValues := sscRm[0].(map[string]interface{}) + replicaModifications := &s3.ReplicaModifications{} + replicaModifications.Status = aws.String(replicaModValues["status"].(string)) + ruleSsc.ReplicaModifications = replicaModifications + } + } rcRule.SourceSelectionCriteria = ruleSsc } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 70ff1e4cea9..acb0374ab3f 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -508,6 +508,59 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }) } +func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Priority: aws.Int64(0), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + ReplicaModifications: &s3.ReplicaModifications{ + Status: aws.String(s3.ReplicaModificationsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -1016,6 +1069,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } +func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" + } + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), @@ -1254,7 +1331,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1341,7 +1418,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1366,24 +1443,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } ` } -func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} ` -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { From 48bdc44c3c21942e8dc740ed40fa86326de45f37 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 14:42:41 -0700 Subject: [PATCH 16/80] terrafmt --- ...3_bucket_replication_configuration_test.go | 570 +++++++++--------- 1 file changed, 285 insertions(+), 285 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index acb0374ab3f..522c3389979 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1002,94 +1002,94 @@ resource "aws_s3_bucket" "destination" { } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket" "source" { - bucket = "tf-test-bucket-source-%[1]d" + bucket = "tf-test-bucket-source-%[1]d" versioning { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } -} `, randInt) +}`, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "%[1]s" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "%[1]s" } -} `, storageClass) + } +}`, storageClass) } func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" + rules { + id = "foobar" + filter { + prefix = "foo" + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - replication_time { - status = "Enabled" - time { - minutes = 15 - } - } - metrics { - status = "Enabled" - event_threshold { - minutes = 15 - } - } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } } } + } }` } func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" - } - source_selection_criteria { - replica_modifications { - status = "Enabled" - } - } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" } } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } }` } @@ -1105,7 +1105,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1117,54 +1117,54 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } - -} `, randInt)) + } + +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1179,7 +1179,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1191,65 +1191,65 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter { - prefix = "prefix3" + filter { + prefix = "prefix3" - tags = { - Key3 = "Value3" - } + tags = { + Key3 = "Value3" } + } - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1264,46 +1264,46 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1316,26 +1316,26 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1344,24 +1344,24 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1370,20 +1370,20 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1398,176 +1398,176 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - } + destination { + bucket = aws_s3_bucket.destination.arn } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - delete_marker_replication_status = "Enabled" + delete_marker_replication_status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 42 + priority = 42 - filter { - tags = { - ReplicateMe = "Yes" - } + filter { + tags = { + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 41 + priority = 41 - filter { - prefix = "foo" + filter { + prefix = "foo" - tags = { - AnotherTag = "OK" - ReplicateMe = "Yes" - } + tags = { + AnotherTag = "OK" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - tags = { - AnotherTag = "OK" - Foo = "Bar" - ReplicateMe = "Yes" - } + filter { + tags = { + AnotherTag = "OK" + Foo = "Bar" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { From 22ee15a150fcb656a798232063d13e268e414f4d Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 15:46:23 -0700 Subject: [PATCH 17/80] terrafmt --- ...3_bucket_replication_configuration_test.go | 89 ++++++++++--------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 522c3389979..012e5f5bb75 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1573,7 +1573,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { return fmt.Sprintf(` resource "aws_iam_role" "test" { - name = %[1]q + name = "%[1]s" assume_role_policy = < Date: Fri, 3 Sep 2021 15:50:53 -0700 Subject: [PATCH 18/80] Initial documentation for new resource Adding documentation page for the new independent resource. Initialized with content copied over from the s3_bucket.html.markdown page. --- ...et_replication_configuration.html.markdown | 190 ++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 website/docs/r/s3_bucket_replication_configuration.html.markdown diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown new file mode 100644 index 00000000000..9333835f4d5 --- /dev/null +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -0,0 +1,190 @@ +--- +subcategory: "S3" +layout: "aws" +page_title: "AWS: aws_s3_bucket_replication_configuration" +description: |- + Provides a S3 bucket replication configuration resource. +--- + +# Resource: aws_s3_bucket_replication_configuration + +Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. + +## Example Usage + +### Using replication configuration + +```terraform +provider "aws" { + region = "eu-west-1" +} + +provider "aws" { + alias = "central" + region = "eu-central-1" +} + +resource "aws_iam_role" "replication" { + name = "tf-iam-role-replication-12345" + + assume_role_policy = < **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. +Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `destination` - (Required) Specifies the destination for the rule (documented below). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. +* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. + +The `destination` object supports the following: + +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + `sse_kms_encrypted_objects` source selection criteria. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. + +The `source_selection_criteria` object supports the following: + +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. + +The `sse_kms_encrypted_objects` object supports the following: + +* `enabled` - (Required) Boolean which indicates if this criteria is enabled. + +The `filter` object supports the following: + +* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. +The rule applies only to objects having all the tags in its tagset. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +## Import + +S3 bucket replication configuration can be imported using the `bucket`, e.g. + +``` +$ terraform import aws_s3_bucket_replication_configuration.replication bucket-name +``` From 644dc23a8ca3080a7807c6419f3d4e87636db346 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 9 Sep 2021 14:26:09 -0700 Subject: [PATCH 19/80] adding new feature documentation --- website/docs/r/s3_bucket.html.markdown | 14 +++++++ ...et_replication_configuration.html.markdown | 40 ++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 88ce04b1eea..05f1a50e7ee 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -289,6 +289,10 @@ resource "aws_s3_bucket" "source" { } ``` +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + + + ### Enable Default Server Side Encryption ```terraform @@ -424,6 +428,16 @@ The `noncurrent_version_transition` object supports the following The `replication_configuration` object supports the following: +~> **NOTE:** See the `aws_s3_bucket_replication_configuration` resource documentation to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9333835f4d5..9870635e069 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -107,6 +107,11 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + lifecycle { + ignore_changes = [ + replication_configuration + ] + } } aws_s3_bucket_replication_configuration replication { @@ -126,6 +131,17 @@ aws_s3_bucket_replication_configuration replication { ``` +~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + + ## Argument Reference The following arguments are supported: @@ -142,6 +158,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). @@ -153,6 +170,10 @@ Replication configuration V1 supports filtering based on only the `prefix` attri ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +The `existing_object_replication` object supports the following: + +* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -161,15 +182,32 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). +* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). + +The `replication_time` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + +The `metrics` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `source_selection_criteria` object supports the following: +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between + replicas and source objects (documented below). + * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` in `destination` must be specified as well. +The `replica_modifications` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `sse_kms_encrypted_objects` object supports the following: -* `enabled` - (Required) Boolean which indicates if this criteria is enabled. +* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `filter` object supports the following: From bd4302cb0790c9aeafdfe5829ebbfa3646869ef8 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:09 -0700 Subject: [PATCH 20/80] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 4 ++-- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 012e5f5bb75..e550fcad46d 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,11 +844,11 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = true +const isExistingObjectReplicationBlocked = false func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { - /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + /* https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. Once that request is approved, this can be unblocked for testing. */ return diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9870635e069..2511d95454b 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -172,6 +172,8 @@ Replication configuration V1 supports filtering based on only the `prefix` attri The `existing_object_replication` object supports the following: +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) + * `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `destination` object supports the following: From 17a88f464b9f5326de0a6cb8cf6a3bb616fdeceb Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:56 -0700 Subject: [PATCH 21/80] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e550fcad46d..fed215ad3c1 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,7 +844,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = false +const isExistingObjectReplicationBlocked = true func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { From 8bf7bc6a2785e0151c94354904a8ed2bd7f48209 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 13 Sep 2021 15:01:45 -0700 Subject: [PATCH 22/80] adding headers and source examples to documentation --- ...et_replication_configuration.html.markdown | 162 +++++++++++++++--- 1 file changed, 139 insertions(+), 23 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 2511d95454b..091286c63ab 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -131,6 +131,76 @@ aws_s3_bucket_replication_configuration replication { ``` +### Bi-Directional Replication + +``` + +... + +resource "aws_s3_bucket" "east" { + bucket = "tf-test-bucket-east-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +resource "aws_s3_bucket" "west" { + provider = west + bucket = "tf-test-bucket-west-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +aws_s3_bucket_replication_configuration "east_to_west" { + role = aws_iam_role.east_replication.arn + bucket = aws_s3_bucket.east.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.west.arn + storage_class = "STANDARD" + } + } +} + +aws_s3_bucket_replication_configuration "west_to_east" { + role = aws_iam_role.west_replication.arn + bucket = aws_s3_bucket.west.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.east.arn + storage_class = "STANDARD" + } + } +} +``` + +## Usage Notes + +This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. + ~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. ``` @@ -140,11 +210,17 @@ lifecycle { ] } ``` +The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +* `replica_modifications` - Added to the `source_selection_criteria` configuration +* `metrics` - Added to the `destination` configuration +* `replication_time` - Added to the `destination` configuration +* `existing_object_replication` - Added to the replication rule + +Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference -The following arguments are supported: +## Argument Reference The `replication_configuration` object supports the following: @@ -152,30 +228,42 @@ The `replication_configuration` object supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). -The `rules` object supports the following: +### Rules + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. ~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. + +The `rules` object supports the following: + With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). -* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. +* `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +### Rule Existing Object Replication + +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) +``` +existing_object_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -187,29 +275,61 @@ The `destination` object supports the following: * `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). * `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +### Replication Time Control + +``` +replication_time { + status = "Enabled" + time { + minutes = 15 + } +} +``` + The `replication_time` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. + +### Metrics + +``` +metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } +} +``` The `metrics` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. + +### Source Selection Criteria The `source_selection_criteria` object supports the following: +``` +source_selection_criteria { + replica_modification { + status = "Enabled" + } + sse_kms_encrypted_objects { + status = "Enabled" + } +} +``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects (documented below). - -* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. - -The `replica_modifications` object supports the following: + replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -The `sse_kms_encrypted_objects` object supports the following: + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Replication Rule Filter The `filter` object supports the following: @@ -217,10 +337,6 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 6bae9ee8d5f855d09ed4a5295da91205801cd786 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 14 Sep 2021 10:30:09 -0700 Subject: [PATCH 23/80] adding internal documentation links, cleanup --- ...et_replication_configuration.html.markdown | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 091286c63ab..099b3261ab7 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -8,7 +8,7 @@ description: |- # Resource: aws_s3_bucket_replication_configuration -Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. +Provides an independent configuration resource for S3 bucket [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). ## Example Usage @@ -199,9 +199,9 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. +~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. ``` lifecycle { @@ -210,25 +210,25 @@ lifecycle { ] } ``` -The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: -* `replica_modifications` - Added to the `source_selection_criteria` configuration -* `metrics` - Added to the `destination` configuration -* `replication_time` - Added to the `destination` configuration -* `existing_object_replication` - Added to the replication rule +* `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) +* `metrics` - Added to the `destination` configuration object [documented below](#metrics) +* `replication_time` - Added to the `destination` configuration object [documented below](#replication_time) +* `existing_object_replication` - Added to the replication rule object [documented below](#existing_object_replication) Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) ## Argument Reference -The `replication_configuration` object supports the following: +The `replication_configuration` resource supports the following: -* `bucket` - (Required) The ARN of the source S3 bucket where you want Amazon S3 to monitor. +* `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. -* `rules` - (Required) Specifies the rules managing the replication (documented below). +* `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### Rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -236,20 +236,19 @@ The `replication_configuration` object supports the following: The `rules` object supports the following: -With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. -Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. -* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). -* `destination` - (Required) Specifies the destination for the rule (documented below). -* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `destination` - (Required) Specifies the destination for the rule [documented below](#destination). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. -* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### Rule Existing Object Replication +### exiting_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) @@ -263,7 +262,7 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -### Destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -272,10 +271,10 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. -* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). -* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +* `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). +* `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). -### Replication Time Control +### replication_time ``` replication_time { @@ -291,7 +290,7 @@ The `replication_time` object supports the following: * `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. -### Metrics +### metrics ``` metrics { @@ -307,7 +306,7 @@ The `metrics` object supports the following: * `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. -### Source Selection Criteria +### source_selection_criteria The `source_selection_criteria` object supports the following: ``` @@ -322,14 +321,14 @@ source_selection_criteria { ``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -### Replication Rule Filter +### filter The `filter` object supports the following: From 8d0562db0ea5c1f4ceda4d4b53ef664cbcd5a3e0 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 15 Sep 2021 16:34:21 -0700 Subject: [PATCH 24/80] Align delete_marker_replication with other objects --- aws/resource_aws_s3_bucket.go | 2 +- ...aws_s3_bucket_replication_configuration.go | 50 +++++++++++-------- ...3_bucket_replication_configuration_test.go | 12 +++-- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 097ccbfb045..df3e5b46000 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -2585,7 +2585,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 8cfffba4044..581ac90653f 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -96,13 +96,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), }, "event_threshold": { Type: schema.TypeList, @@ -126,13 +126,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), }, "time": { Type: schema.TypeList, @@ -172,7 +172,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), }, }, }, @@ -187,7 +187,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), }, }, }, @@ -235,15 +235,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), }, }, }, }, - "delete_marker_replication_status": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + "delete_marker_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), + }, + }, + }, }, }, }, @@ -391,7 +401,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) - t["existing_object_replication"] = status + t["existing_object_replication"] = []interface{}{status} } if v.ID != nil { @@ -431,8 +441,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } t["filter"] = []interface{}{m} - if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { - t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + t["delete_marker_replication"] = []interface{}{status} } } @@ -573,13 +585,11 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) } - if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(dmr), - } - } else { + dmr, ok := rr["delete_marker_replication"].([]interface{}) + if ok && len(dmr) > 0 { + s := dmr[0].(map[string]interface{}) rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s["status"].(string)), } } } else { diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index fed215ad3c1..c5c6879cf80 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1479,7 +1479,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "foo" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1628,7 +1630,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "testprefix" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1701,7 +1705,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn From eb4cc6bd008ec11faa466c7aa29265b4e8595a0e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 08:31:08 -0700 Subject: [PATCH 25/80] Update delete_marker replication docs to reflect changes --- ...ket_replication_configuration.html.markdown | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 099b3261ab7..44d46f1c4d0 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -239,7 +239,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication` - (Optional) Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when `filter` is used)[documented below](#delete_marker_replication). * `destination` - (Required) Specifies the destination for the rule [documented below](#destination). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. @@ -262,6 +262,22 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +### delete_marker_replication + +~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. + +~> **NOTE:** This argument is only available with V2 replication configurations. + +The `delete_marker_replication` object supports the following: + +``` +delete_marker_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + + ### destination The `destination` object supports the following: From 27e63481ef9d9a6e1b3f7b8f84f6ca7749b86ba1 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 13:59:37 -0700 Subject: [PATCH 26/80] Documentation adjustments fix typos shift notes to be above examples remove unnecssary words expand on some attribute concepts that maybe obscure --- website/docs/r/s3_bucket.html.markdown | 6 +-- ...et_replication_configuration.html.markdown | 51 ++++++++++--------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 05f1a50e7ee..6351cbeeda8 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -178,6 +178,8 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using replication configuration +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + ```terraform provider "aws" { region = "eu-west-1" @@ -289,10 +291,6 @@ resource "aws_s3_bucket" "source" { } ``` -~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. - - - ### Enable Default Server Side Encryption ```terraform diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 44d46f1c4d0..de512bcb34d 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,9 +133,9 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -``` +```terraform -... +#... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -199,17 +199,18 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. - ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -``` +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. + +```terraform lifecycle { ignore_changes = [ replication_configuration ] } ``` + The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: * `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) @@ -248,51 +249,53 @@ With the `filter` attribute, you can specify object filters based on the object * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### exiting_object_replication +### existing_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -``` +```terraform existing_object_replication { status = "Enabled" } ``` -* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. ### delete_marker_replication -~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. +~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. ~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: -``` +```terraform delete_marker_replication { status = "Enabled" } ``` -* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. ### destination The `destination` object supports the following: -* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. -* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. * `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with `sse_kms_encrypted_objects` source selection criteria. -* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. -* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. * `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). * `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). ### replication_time -``` +```terraform replication_time { status = "Enabled" time { @@ -303,12 +306,12 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics -``` +```terraform metrics { status = "Enabled" event_threshold { @@ -319,13 +322,14 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria The `source_selection_criteria` object supports the following: -``` + +```terraform source_selection_criteria { replica_modification { status = "Enabled" @@ -336,13 +340,14 @@ source_selection_criteria { } ``` + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. + * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. - ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. ### filter From 7735848b5ae4f19129a4a60cce3cd5fb4261b183 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:15:32 -0700 Subject: [PATCH 27/80] linting --- ...s3_bucket_replication_configuration.html.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index de512bcb34d..ec38b8c4d2d 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -229,7 +229,7 @@ The `replication_configuration` resource supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -268,7 +268,7 @@ existing_object_replication { ~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. -~> **NOTE:** This argument is only available with V2 replication configurations. +~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: @@ -281,7 +281,7 @@ delete_marker_replication { * `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. -### destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. @@ -306,7 +306,7 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics @@ -322,7 +322,7 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria @@ -342,7 +342,7 @@ source_selection_criteria { ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` From 2411b0ee6e5c64b62786edf89ca909bcbe5ef261 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:41:01 -0700 Subject: [PATCH 28/80] linting/fmt --- aws/resource_aws_s3_bucket_replication_configuration.go | 2 +- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 581ac90653f..9fed71889cf 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -347,7 +347,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met r := replication.ReplicationConfiguration // set role if r.Role != nil && aws.StringValue(r.Role) != "" { - d.Set("role", aws.StringValue(r.Role)) + d.Set("role", r.Role) } rules := make([]interface{}, 0, len(r.Rules)) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index ec38b8c4d2d..c119ce2bcd0 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference +## Attributes Reference The `replication_configuration` resource supports the following: From 64400e1596f7e4021841ee01c7ae7a5840584730 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 15:00:32 -0700 Subject: [PATCH 29/80] adding missing attribute reference to documentation --- .../r/s3_bucket_replication_configuration.html.markdown | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index c119ce2bcd0..eea1a86a844 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Attributes Reference +## Argument Reference The `replication_configuration` resource supports the following: @@ -357,6 +357,12 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* id - Resource id is the s3 source bucket name. + ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 6e7484558916c0a90b565c252759773af5c23ad8 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:49:58 -0400 Subject: [PATCH 30/80] address linter-related errors --- ...3_bucket_replication_configuration_test.go | 14 ++++----- ...et_replication_configuration.html.markdown | 31 +++++++++---------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c5c6879cf80..d8274ed6cf2 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1480,8 +1480,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1631,8 +1631,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1706,8 +1706,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1715,7 +1715,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } - - `, rName, rNameDestination, rInt) } diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index eea1a86a844..6c38b9703c0 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -114,9 +114,9 @@ resource "aws_s3_bucket" "source" { } } -aws_s3_bucket_replication_configuration replication { +resource "aws_s3_bucket_replication_configuration" "replication" { role = aws_iam_role.replication.arn - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id rules { id = "foobar" prefix = "foo" @@ -128,14 +128,13 @@ aws_s3_bucket_replication_configuration replication { } } } - ``` ### Bi-Directional Replication ```terraform -#... +# ... other configuration ... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -166,9 +165,9 @@ resource "aws_s3_bucket" "west" { } } -aws_s3_bucket_replication_configuration "east_to_west" { +resource "aws_s3_bucket_replication_configuration" "east_to_west" { role = aws_iam_role.east_replication.arn - bucket = aws_s3_bucket.east.id + bucket = aws_s3_bucket.east.id rules { id = "foobar" prefix = "foo" @@ -181,9 +180,9 @@ aws_s3_bucket_replication_configuration "east_to_west" { } } -aws_s3_bucket_replication_configuration "west_to_east" { +resource "aws_s3_bucket_replication_configuration" "west_to_east" { role = aws_iam_role.west_replication.arn - bucket = aws_s3_bucket.west.id + bucket = aws_s3_bucket.west.id rules { id = "foobar" prefix = "foo" @@ -201,9 +200,9 @@ aws_s3_bucket_replication_configuration "west_to_east" { ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. -```terraform +```hcl lifecycle { ignore_changes = [ replication_configuration @@ -255,7 +254,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```terraform +```hcl existing_object_replication { status = "Enabled" } @@ -272,7 +271,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```terraform +```hcl delete_marker_replication { status = "Enabled" } @@ -295,7 +294,7 @@ The `destination` object supports the following: ### replication_time -```terraform +```hcl replication_time { status = "Enabled" time { @@ -311,7 +310,7 @@ The `replication_time` object supports the following: ### metrics -```terraform +```hcl metrics { status = "Enabled" event_threshold { @@ -329,7 +328,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```terraform +```hcl source_selection_criteria { replica_modification { status = "Enabled" @@ -357,7 +356,7 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attribute Reference +## Attributes Reference In addition to all arguments above, the following attributes are exported: From 71d8119187b60ca73e0c5b9a8af31ed76bd356cb Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:52:12 -0400 Subject: [PATCH 31/80] Update CHANGELOG for #20777 --- .changelog/20777.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/20777.txt diff --git a/.changelog/20777.txt b/.changelog/20777.txt new file mode 100644 index 00000000000..75e556fa77e --- /dev/null +++ b/.changelog/20777.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_s3_bucket_replication_configuration +``` From eceb584d360ac3d5121f960dc9a61a4d2fcede0a Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 10:17:16 -0400 Subject: [PATCH 32/80] forgo syntax highlighting in short snippet code blocks in documentation --- ...bucket_replication_configuration.html.markdown | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 6c38b9703c0..877f4082b26 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,7 +133,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication ```terraform - # ... other configuration ... resource "aws_s3_bucket" "east" { @@ -202,7 +201,7 @@ resource "aws_s3_bucket_replication_configuration" "west_to_east" { This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. -```hcl +``` lifecycle { ignore_changes = [ replication_configuration @@ -254,7 +253,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```hcl +``` existing_object_replication { status = "Enabled" } @@ -271,7 +270,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```hcl +``` delete_marker_replication { status = "Enabled" } @@ -294,7 +293,7 @@ The `destination` object supports the following: ### replication_time -```hcl +``` replication_time { status = "Enabled" time { @@ -310,7 +309,7 @@ The `replication_time` object supports the following: ### metrics -```hcl +``` metrics { status = "Enabled" event_threshold { @@ -328,7 +327,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```hcl +``` source_selection_criteria { replica_modification { status = "Enabled" @@ -366,6 +365,6 @@ In addition to all arguments above, the following attributes are exported: S3 bucket replication configuration can be imported using the `bucket`, e.g. -``` +```sh $ terraform import aws_s3_bucket_replication_configuration.replication bucket-name ``` From f6839047e70c1eb06483216933e2849a0ea9d7c3 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 23 Sep 2021 07:46:36 -0700 Subject: [PATCH 33/80] use untyped code blocks until new resource is merged to validate --- ...ucket_replication_configuration.html.markdown | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index eea1a86a844..ee5ce245b7b 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -14,7 +14,7 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration -```terraform +``` provider "aws" { region = "eu-west-1" } @@ -133,7 +133,7 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -```terraform +``` #... @@ -203,7 +203,7 @@ aws_s3_bucket_replication_configuration "west_to_east" { This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -```terraform +``` lifecycle { ignore_changes = [ replication_configuration @@ -255,7 +255,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```terraform +``` existing_object_replication { status = "Enabled" } @@ -272,7 +272,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```terraform +``` delete_marker_replication { status = "Enabled" } @@ -295,7 +295,7 @@ The `destination` object supports the following: ### replication_time -```terraform +``` replication_time { status = "Enabled" time { @@ -311,7 +311,7 @@ The `replication_time` object supports the following: ### metrics -```terraform +``` metrics { status = "Enabled" event_threshold { @@ -329,7 +329,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```terraform +``` source_selection_criteria { replica_modification { status = "Enabled" From c2e0724680426f8bc347eaebc7a57af6fbd48138 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 24 Sep 2021 10:40:10 -0700 Subject: [PATCH 34/80] Revert key renamed in error --- aws/resource_aws_s3_bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index df3e5b46000..097ccbfb045 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -2585,7 +2585,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } From d4b7249663631a4ee92e666f942c698b1633b1f3 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sun, 26 Sep 2021 13:35:32 -0700 Subject: [PATCH 35/80] Clean up stray merge conflict --- .../r/s3_bucket_replication_configuration.html.markdown | 6 ------ 1 file changed, 6 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index fc715c35bf1..8de6f01c14e 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -132,14 +132,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication -<<<<<<< HEAD ``` - -#... -======= -```terraform # ... other configuration ... ->>>>>>> eceb584d360ac3d5121f960dc9a61a4d2fcede0a resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" From a87d031a674258b3b756edb4de45f198e777e29e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sun, 26 Sep 2021 13:36:06 -0700 Subject: [PATCH 36/80] Add logic for explicit delete Include delete logic for replication configuration Adding test for delete logic --- ...aws_s3_bucket_replication_configuration.go | 13 ++++ ...3_bucket_replication_configuration_test.go | 62 +++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 9fed71889cf..42cbce8d6ef 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -628,6 +628,19 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) + + dbri := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(bucket), + } + + _, err := s3conn.DeleteBucketReplication(dbri) + if err != nil { + return fmt.Errorf("Error removing S3 bucket replication: %s", err) + } return nil } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index d8274ed6cf2..ff518137550 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -906,6 +906,68 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) }) } +func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + testDeleted := func(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[r] + if ok { + return fmt.Errorf("Replication resource configuration %q should have been deleted.", r) + } + return nil + } + } + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSS3BucketReplicationConfigBasic(rInt), + Check: resource.ComposeTestCheckFunc(testDeleted(resourceName)), + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] From 84904b06ab7a43af20e97dd342b2f4b665d8d1d3 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:55:05 -0700 Subject: [PATCH 37/80] init setup for replication configuration resource Blocking out general structure for new independent resource for managing the s3 bucket replication configuration settings Pulling over logic from resource s3 bucket to start with --- internal/service/s3/bucket.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index f143f556cb3..5bc4db22b41 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -818,7 +818,7 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("replication_configuration") { - if err := resourceBucketReplicationConfigurationUpdate(conn, d); err != nil { + if err := resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn, d); err != nil { return err } } @@ -2033,7 +2033,7 @@ func resourceBucketObjectLockConfigurationUpdate(conn *s3.S3, d *schema.Resource return nil } -func resourceBucketReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { +func resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) replicationConfiguration := d.Get("replication_configuration").([]interface{}) From da4da50713b92527668383df4460d708b077ad86 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:57:33 -0700 Subject: [PATCH 38/80] adding new resource for replication configurations --- aws/provider.go | 1702 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1702 insertions(+) create mode 100644 aws/provider.go diff --git a/aws/provider.go b/aws/provider.go new file mode 100644 index 00000000000..5641b36adb4 --- /dev/null +++ b/aws/provider.go @@ -0,0 +1,1702 @@ +package aws + +import ( + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/mutexkv" +) + +// Provider returns a *schema.Provider. +func Provider() *schema.Provider { + // TODO: Move the validation to this, requires conditional schemas + // TODO: Move the configuration to this, requires validation + + // The actual provider + provider := &schema.Provider{ + Schema: map[string]*schema.Schema{ + "access_key": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["access_key"], + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["secret_key"], + }, + + "profile": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["profile"], + }, + + "assume_role": assumeRoleSchema(), + + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["shared_credentials_file"], + }, + + "token": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["token"], + }, + + "region": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", + }, nil), + Description: descriptions["region"], + InputDefault: "us-east-1", // lintignore:AWSAT003 + }, + + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Default: 25, + Description: descriptions["max_retries"], + }, + + "allowed_account_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ConflictsWith: []string{"forbidden_account_ids"}, + Set: schema.HashString, + }, + + "forbidden_account_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ConflictsWith: []string{"allowed_account_ids"}, + Set: schema.HashString, + }, + + "default_tags": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Configuration block with settings to default resource tags across all resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Resource tags to default across all resources", + }, + }, + }, + }, + + "endpoints": endpointsSchema(), + + "ignore_tags": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Configuration block with settings to ignore resource tags across all resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "keys": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Resource tag keys to ignore across all resources.", + }, + "key_prefixes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Resource tag key prefixes to ignore across all resources.", + }, + }, + }, + }, + + "insecure": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["insecure"], + }, + + "skip_credentials_validation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_credentials_validation"], + }, + + "skip_get_ec2_platforms": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_get_ec2_platforms"], + }, + + "skip_region_validation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_region_validation"], + }, + + "skip_requesting_account_id": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_requesting_account_id"], + }, + + "skip_metadata_api_check": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_metadata_api_check"], + }, + + "s3_force_path_style": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["s3_force_path_style"], + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "aws_acm_certificate": dataSourceAwsAcmCertificate(), + "aws_acmpca_certificate_authority": dataSourceAwsAcmpcaCertificateAuthority(), + "aws_acmpca_certificate": dataSourceAwsAcmpcaCertificate(), + "aws_ami": dataSourceAwsAmi(), + "aws_ami_ids": dataSourceAwsAmiIds(), + "aws_api_gateway_api_key": dataSourceAwsApiGatewayApiKey(), + "aws_api_gateway_domain_name": dataSourceAwsApiGatewayDomainName(), + "aws_api_gateway_resource": dataSourceAwsApiGatewayResource(), + "aws_api_gateway_rest_api": dataSourceAwsApiGatewayRestApi(), + "aws_api_gateway_vpc_link": dataSourceAwsApiGatewayVpcLink(), + "aws_apigatewayv2_api": dataSourceAwsApiGatewayV2Api(), + "aws_apigatewayv2_apis": dataSourceAwsApiGatewayV2Apis(), + "aws_appmesh_mesh": dataSourceAwsAppmeshMesh(), + "aws_appmesh_virtual_service": dataSourceAwsAppmeshVirtualService(), + "aws_arn": dataSourceAwsArn(), + "aws_autoscaling_group": dataSourceAwsAutoscalingGroup(), + "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), + "aws_availability_zone": dataSourceAwsAvailabilityZone(), + "aws_availability_zones": dataSourceAwsAvailabilityZones(), + "aws_backup_plan": dataSourceAwsBackupPlan(), + "aws_backup_selection": dataSourceAwsBackupSelection(), + "aws_backup_vault": dataSourceAwsBackupVault(), + "aws_batch_compute_environment": dataSourceAwsBatchComputeEnvironment(), + "aws_batch_job_queue": dataSourceAwsBatchJobQueue(), + "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), + "aws_caller_identity": dataSourceAwsCallerIdentity(), + "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), + "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), + "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), + "aws_cloudformation_type": dataSourceAwsCloudFormationType(), + "aws_cloudfront_cache_policy": dataSourceAwsCloudFrontCachePolicy(), + "aws_cloudfront_distribution": dataSourceAwsCloudFrontDistribution(), + "aws_cloudfront_function": dataSourceAwsCloudFrontFunction(), + "aws_cloudfront_origin_request_policy": dataSourceAwsCloudFrontOriginRequestPolicy(), + "aws_cloudhsm_v2_cluster": dataSourceCloudHsmV2Cluster(), + "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), + "aws_cloudwatch_event_connection": dataSourceAwsCloudwatchEventConnection(), + "aws_cloudwatch_event_source": dataSourceAwsCloudWatchEventSource(), + "aws_cloudwatch_log_group": dataSourceAwsCloudwatchLogGroup(), + "aws_codeartifact_authorization_token": dataSourceAwsCodeArtifactAuthorizationToken(), + "aws_codeartifact_repository_endpoint": dataSourceAwsCodeArtifactRepositoryEndpoint(), + "aws_cognito_user_pools": dataSourceAwsCognitoUserPools(), + "aws_codecommit_repository": dataSourceAwsCodeCommitRepository(), + "aws_codestarconnections_connection": dataSourceAwsCodeStarConnectionsConnection(), + "aws_cur_report_definition": dataSourceAwsCurReportDefinition(), + "aws_default_tags": dataSourceAwsDefaultTags(), + "aws_db_cluster_snapshot": dataSourceAwsDbClusterSnapshot(), + "aws_db_event_categories": dataSourceAwsDbEventCategories(), + "aws_db_instance": dataSourceAwsDbInstance(), + "aws_db_snapshot": dataSourceAwsDbSnapshot(), + "aws_db_subnet_group": dataSourceAwsDbSubnetGroup(), + "aws_directory_service_directory": dataSourceAwsDirectoryServiceDirectory(), + "aws_docdb_engine_version": dataSourceAwsDocdbEngineVersion(), + "aws_docdb_orderable_db_instance": dataSourceAwsDocdbOrderableDbInstance(), + "aws_dx_gateway": dataSourceAwsDxGateway(), + "aws_dynamodb_table": dataSourceAwsDynamoDbTable(), + "aws_ebs_default_kms_key": dataSourceAwsEbsDefaultKmsKey(), + "aws_ebs_encryption_by_default": dataSourceAwsEbsEncryptionByDefault(), + "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), + "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), + "aws_ebs_volume": dataSourceAwsEbsVolume(), + "aws_ebs_volumes": dataSourceAwsEbsVolumes(), + "aws_ec2_coip_pool": dataSourceAwsEc2CoipPool(), + "aws_ec2_coip_pools": dataSourceAwsEc2CoipPools(), + "aws_ec2_instance_type": dataSourceAwsEc2InstanceType(), + "aws_ec2_instance_type_offering": dataSourceAwsEc2InstanceTypeOffering(), + "aws_ec2_instance_type_offerings": dataSourceAwsEc2InstanceTypeOfferings(), + "aws_ec2_local_gateway": dataSourceAwsEc2LocalGateway(), + "aws_ec2_local_gateways": dataSourceAwsEc2LocalGateways(), + "aws_ec2_local_gateway_route_table": dataSourceAwsEc2LocalGatewayRouteTable(), + "aws_ec2_local_gateway_route_tables": dataSourceAwsEc2LocalGatewayRouteTables(), + "aws_ec2_local_gateway_virtual_interface": dataSourceAwsEc2LocalGatewayVirtualInterface(), + "aws_ec2_local_gateway_virtual_interface_group": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroup(), + "aws_ec2_local_gateway_virtual_interface_groups": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroups(), + "aws_ec2_managed_prefix_list": dataSourceAwsEc2ManagedPrefixList(), + "aws_ec2_spot_price": dataSourceAwsEc2SpotPrice(), + "aws_ec2_transit_gateway": dataSourceAwsEc2TransitGateway(), + "aws_ec2_transit_gateway_dx_gateway_attachment": dataSourceAwsEc2TransitGatewayDxGatewayAttachment(), + "aws_ec2_transit_gateway_peering_attachment": dataSourceAwsEc2TransitGatewayPeeringAttachment(), + "aws_ec2_transit_gateway_route_table": dataSourceAwsEc2TransitGatewayRouteTable(), + "aws_ec2_transit_gateway_route_tables": dataSourceAwsEc2TransitGatewayRouteTables(), + "aws_ec2_transit_gateway_vpc_attachment": dataSourceAwsEc2TransitGatewayVpcAttachment(), + "aws_ec2_transit_gateway_vpn_attachment": dataSourceAwsEc2TransitGatewayVpnAttachment(), + "aws_ecr_authorization_token": dataSourceAwsEcrAuthorizationToken(), + "aws_ecr_image": dataSourceAwsEcrImage(), + "aws_ecr_repository": dataSourceAwsEcrRepository(), + "aws_ecs_cluster": dataSourceAwsEcsCluster(), + "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), + "aws_ecs_service": dataSourceAwsEcsService(), + "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), + "aws_customer_gateway": dataSourceAwsCustomerGateway(), + "aws_efs_access_point": dataSourceAwsEfsAccessPoint(), + "aws_efs_access_points": dataSourceAwsEfsAccessPoints(), + "aws_efs_file_system": dataSourceAwsEfsFileSystem(), + "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), + "aws_eip": dataSourceAwsEip(), + "aws_eks_addon": dataSourceAwsEksAddon(), + "aws_eks_cluster": dataSourceAwsEksCluster(), + "aws_eks_cluster_auth": dataSourceAwsEksClusterAuth(), + "aws_elastic_beanstalk_application": dataSourceAwsElasticBeanstalkApplication(), + "aws_elastic_beanstalk_hosted_zone": dataSourceAwsElasticBeanstalkHostedZone(), + "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), + "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), + "aws_elasticache_replication_group": dataSourceAwsElasticacheReplicationGroup(), + "aws_elasticache_user": dataSourceAwsElastiCacheUser(), + "aws_elasticsearch_domain": dataSourceAwsElasticSearchDomain(), + "aws_elb": dataSourceAwsElb(), + "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), + "aws_elb_service_account": dataSourceAwsElbServiceAccount(), + "aws_globalaccelerator_accelerator": dataSourceAwsGlobalAcceleratorAccelerator(), + "aws_glue_connection": dataSourceAwsGlueConnection(), + "aws_glue_data_catalog_encryption_settings": dataSourceAwsGlueDataCatalogEncryptionSettings(), + "aws_glue_script": dataSourceAwsGlueScript(), + "aws_guardduty_detector": dataSourceAwsGuarddutyDetector(), + "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), + "aws_iam_group": dataSourceAwsIAMGroup(), + "aws_iam_instance_profile": dataSourceAwsIAMInstanceProfile(), + "aws_iam_policy": dataSourceAwsIAMPolicy(), + "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), + "aws_iam_role": dataSourceAwsIAMRole(), + "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), + "aws_iam_session_context": dataSourceAwsIAMSessionContext(), + "aws_iam_user": dataSourceAwsIAMUser(), + "aws_identitystore_group": dataSourceAwsIdentityStoreGroup(), + "aws_identitystore_user": dataSourceAwsIdentityStoreUser(), + "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), + "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), + "aws_imagebuilder_image": dataSourceAwsImageBuilderImage(), + "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), + "aws_imagebuilder_image_recipe": dataSourceAwsImageBuilderImageRecipe(), + "aws_imagebuilder_infrastructure_configuration": datasourceAwsImageBuilderInfrastructureConfiguration(), + "aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(), + "aws_instance": dataSourceAwsInstance(), + "aws_instances": dataSourceAwsInstances(), + "aws_internet_gateway": dataSourceAwsInternetGateway(), + "aws_iot_endpoint": dataSourceAwsIotEndpoint(), + "aws_ip_ranges": dataSourceAwsIPRanges(), + "aws_kinesis_stream": dataSourceAwsKinesisStream(), + "aws_kinesis_stream_consumer": dataSourceAwsKinesisStreamConsumer(), + "aws_kms_alias": dataSourceAwsKmsAlias(), + "aws_kms_ciphertext": dataSourceAwsKmsCiphertext(), + "aws_kms_key": dataSourceAwsKmsKey(), + "aws_kms_public_key": dataSourceAwsKmsPublicKey(), + "aws_kms_secret": dataSourceAwsKmsSecret(), + "aws_kms_secrets": dataSourceAwsKmsSecrets(), + "aws_lakeformation_data_lake_settings": dataSourceAwsLakeFormationDataLakeSettings(), + "aws_lakeformation_permissions": dataSourceAwsLakeFormationPermissions(), + "aws_lakeformation_resource": dataSourceAwsLakeFormationResource(), + "aws_lambda_alias": dataSourceAwsLambdaAlias(), + "aws_lambda_code_signing_config": dataSourceAwsLambdaCodeSigningConfig(), + "aws_lambda_function": dataSourceAwsLambdaFunction(), + "aws_lambda_invocation": dataSourceAwsLambdaInvocation(), + "aws_lambda_layer_version": dataSourceAwsLambdaLayerVersion(), + "aws_launch_configuration": dataSourceAwsLaunchConfiguration(), + "aws_launch_template": dataSourceAwsLaunchTemplate(), + "aws_lex_bot_alias": dataSourceAwsLexBotAlias(), + "aws_lex_bot": dataSourceAwsLexBot(), + "aws_lex_intent": dataSourceAwsLexIntent(), + "aws_lex_slot_type": dataSourceAwsLexSlotType(), + "aws_mq_broker": dataSourceAwsMqBroker(), + "aws_msk_cluster": dataSourceAwsMskCluster(), + "aws_msk_configuration": dataSourceAwsMskConfiguration(), + "aws_nat_gateway": dataSourceAwsNatGateway(), + "aws_neptune_orderable_db_instance": dataSourceAwsNeptuneOrderableDbInstance(), + "aws_neptune_engine_version": dataSourceAwsNeptuneEngineVersion(), + "aws_network_acls": dataSourceAwsNetworkAcls(), + "aws_network_interface": dataSourceAwsNetworkInterface(), + "aws_network_interfaces": dataSourceAwsNetworkInterfaces(), + "aws_organizations_delegated_administrators": dataSourceAwsOrganizationsDelegatedAdministrators(), + "aws_organizations_delegated_services": dataSourceAwsOrganizationsDelegatedServices(), + "aws_organizations_organization": dataSourceAwsOrganizationsOrganization(), + "aws_organizations_organizational_units": dataSourceAwsOrganizationsOrganizationalUnits(), + "aws_outposts_outpost": dataSourceAwsOutpostsOutpost(), + "aws_outposts_outpost_instance_type": dataSourceAwsOutpostsOutpostInstanceType(), + "aws_outposts_outpost_instance_types": dataSourceAwsOutpostsOutpostInstanceTypes(), + "aws_outposts_outposts": dataSourceAwsOutpostsOutposts(), + "aws_outposts_site": dataSourceAwsOutpostsSite(), + "aws_outposts_sites": dataSourceAwsOutpostsSites(), + "aws_partition": dataSourceAwsPartition(), + "aws_prefix_list": dataSourceAwsPrefixList(), + "aws_pricing_product": dataSourceAwsPricingProduct(), + "aws_qldb_ledger": dataSourceAwsQLDBLedger(), + "aws_ram_resource_share": dataSourceAwsRamResourceShare(), + "aws_rds_certificate": dataSourceAwsRdsCertificate(), + "aws_rds_cluster": dataSourceAwsRdsCluster(), + "aws_rds_engine_version": dataSourceAwsRdsEngineVersion(), + "aws_rds_orderable_db_instance": dataSourceAwsRdsOrderableDbInstance(), + "aws_redshift_cluster": dataSourceAwsRedshiftCluster(), + "aws_redshift_orderable_cluster": dataSourceAwsRedshiftOrderableCluster(), + "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), + "aws_region": dataSourceAwsRegion(), + "aws_regions": dataSourceAwsRegions(), + "aws_resourcegroupstaggingapi_resources": dataSourceAwsResourceGroupsTaggingAPIResources(), + "aws_route": dataSourceAwsRoute(), + "aws_route_table": dataSourceAwsRouteTable(), + "aws_route_tables": dataSourceAwsRouteTables(), + "aws_route53_delegation_set": dataSourceAwsDelegationSet(), + "aws_route53_resolver_endpoint": dataSourceAwsRoute53ResolverEndpoint(), + "aws_route53_resolver_rule": dataSourceAwsRoute53ResolverRule(), + "aws_route53_resolver_rules": dataSourceAwsRoute53ResolverRules(), + "aws_route53_zone": dataSourceAwsRoute53Zone(), + "aws_s3_bucket": dataSourceAwsS3Bucket(), + "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), + "aws_s3_bucket_objects": dataSourceAwsS3BucketObjects(), + "aws_sagemaker_prebuilt_ecr_image": dataSourceAwsSageMakerPrebuiltECRImage(), + "aws_secretsmanager_secret": dataSourceAwsSecretsManagerSecret(), + "aws_secretsmanager_secret_rotation": dataSourceAwsSecretsManagerSecretRotation(), + "aws_secretsmanager_secret_version": dataSourceAwsSecretsManagerSecretVersion(), + "aws_servicecatalog_constraint": dataSourceAwsServiceCatalogConstraint(), + "aws_servicecatalog_launch_paths": dataSourceAwsServiceCatalogLaunchPaths(), + "aws_servicecatalog_portfolio_constraints": dataSourceAwsServiceCatalogPortfolioConstraints(), + "aws_servicecatalog_portfolio": dataSourceAwsServiceCatalogPortfolio(), + "aws_servicecatalog_product": dataSourceAwsServiceCatalogProduct(), + "aws_servicequotas_service": dataSourceAwsServiceQuotasService(), + "aws_servicequotas_service_quota": dataSourceAwsServiceQuotasServiceQuota(), + "aws_service_discovery_dns_namespace": dataSourceServiceDiscoveryDnsNamespace(), + "aws_sfn_activity": dataSourceAwsSfnActivity(), + "aws_sfn_state_machine": dataSourceAwsSfnStateMachine(), + "aws_signer_signing_job": dataSourceAwsSignerSigningJob(), + "aws_signer_signing_profile": dataSourceAwsSignerSigningProfile(), + "aws_sns_topic": dataSourceAwsSnsTopic(), + "aws_sqs_queue": dataSourceAwsSqsQueue(), + "aws_ssm_document": dataSourceAwsSsmDocument(), + "aws_ssm_parameter": dataSourceAwsSsmParameter(), + "aws_ssm_patch_baseline": dataSourceAwsSsmPatchBaseline(), + "aws_ssoadmin_instances": dataSourceAwsSsoAdminInstances(), + "aws_ssoadmin_permission_set": dataSourceAwsSsoAdminPermissionSet(), + "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), + "aws_subnet": dataSourceAwsSubnet(), + "aws_subnet_ids": dataSourceAwsSubnetIDs(), + "aws_transfer_server": dataSourceAwsTransferServer(), + "aws_vpcs": dataSourceAwsVpcs(), + "aws_security_group": dataSourceAwsSecurityGroup(), + "aws_security_groups": dataSourceAwsSecurityGroups(), + "aws_vpc": dataSourceAwsVpc(), + "aws_vpc_dhcp_options": dataSourceAwsVpcDhcpOptions(), + "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), + "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), + "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), + "aws_vpc_peering_connections": dataSourceAwsVpcPeeringConnections(), + "aws_vpn_gateway": dataSourceAwsVpnGateway(), + "aws_waf_ipset": dataSourceAwsWafIpSet(), + "aws_waf_rule": dataSourceAwsWafRule(), + "aws_waf_rate_based_rule": dataSourceAwsWafRateBasedRule(), + "aws_waf_web_acl": dataSourceAwsWafWebAcl(), + "aws_wafregional_ipset": dataSourceAwsWafRegionalIpSet(), + "aws_wafregional_rule": dataSourceAwsWafRegionalRule(), + "aws_wafregional_rate_based_rule": dataSourceAwsWafRegionalRateBasedRule(), + "aws_wafregional_web_acl": dataSourceAwsWafRegionalWebAcl(), + "aws_wafv2_ip_set": dataSourceAwsWafv2IPSet(), + "aws_wafv2_regex_pattern_set": dataSourceAwsWafv2RegexPatternSet(), + "aws_wafv2_rule_group": dataSourceAwsWafv2RuleGroup(), + "aws_wafv2_web_acl": dataSourceAwsWafv2WebACL(), + "aws_workspaces_bundle": dataSourceAwsWorkspacesBundle(), + "aws_workspaces_directory": dataSourceAwsWorkspacesDirectory(), + "aws_workspaces_image": dataSourceAwsWorkspacesImage(), + "aws_workspaces_workspace": dataSourceAwsWorkspacesWorkspace(), + + // Adding the Aliases for the ALB -> LB Rename + "aws_lb": dataSourceAwsLb(), + "aws_alb": dataSourceAwsLb(), + "aws_lb_listener": dataSourceAwsLbListener(), + "aws_alb_listener": dataSourceAwsLbListener(), + "aws_lb_target_group": dataSourceAwsLbTargetGroup(), + "aws_alb_target_group": dataSourceAwsLbTargetGroup(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "aws_accessanalyzer_analyzer": resourceAwsAccessAnalyzerAnalyzer(), + "aws_acm_certificate": resourceAwsAcmCertificate(), + "aws_acm_certificate_validation": resourceAwsAcmCertificateValidation(), + "aws_acmpca_certificate_authority": resourceAwsAcmpcaCertificateAuthority(), + "aws_acmpca_certificate_authority_certificate": resourceAwsAcmpcaCertificateAuthorityCertificate(), + "aws_acmpca_certificate": resourceAwsAcmpcaCertificate(), + "aws_ami": resourceAwsAmi(), + "aws_ami_copy": resourceAwsAmiCopy(), + "aws_ami_from_instance": resourceAwsAmiFromInstance(), + "aws_ami_launch_permission": resourceAwsAmiLaunchPermission(), + "aws_amplify_app": resourceAwsAmplifyApp(), + "aws_amplify_backend_environment": resourceAwsAmplifyBackendEnvironment(), + "aws_amplify_branch": resourceAwsAmplifyBranch(), + "aws_amplify_domain_association": resourceAwsAmplifyDomainAssociation(), + "aws_amplify_webhook": resourceAwsAmplifyWebhook(), + "aws_api_gateway_account": resourceAwsApiGatewayAccount(), + "aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(), + "aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(), + "aws_api_gateway_base_path_mapping": resourceAwsApiGatewayBasePathMapping(), + "aws_api_gateway_client_certificate": resourceAwsApiGatewayClientCertificate(), + "aws_api_gateway_deployment": resourceAwsApiGatewayDeployment(), + "aws_api_gateway_documentation_part": resourceAwsApiGatewayDocumentationPart(), + "aws_api_gateway_documentation_version": resourceAwsApiGatewayDocumentationVersion(), + "aws_api_gateway_domain_name": resourceAwsApiGatewayDomainName(), + "aws_api_gateway_gateway_response": resourceAwsApiGatewayGatewayResponse(), + "aws_api_gateway_integration": resourceAwsApiGatewayIntegration(), + "aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(), + "aws_api_gateway_method": resourceAwsApiGatewayMethod(), + "aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(), + "aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(), + "aws_api_gateway_model": resourceAwsApiGatewayModel(), + "aws_api_gateway_request_validator": resourceAwsApiGatewayRequestValidator(), + "aws_api_gateway_resource": resourceAwsApiGatewayResource(), + "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), + "aws_api_gateway_rest_api_policy": resourceAwsApiGatewayRestApiPolicy(), + "aws_api_gateway_stage": resourceAwsApiGatewayStage(), + "aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(), + "aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(), + "aws_api_gateway_vpc_link": resourceAwsApiGatewayVpcLink(), + "aws_apigatewayv2_api": resourceAwsApiGatewayV2Api(), + "aws_apigatewayv2_api_mapping": resourceAwsApiGatewayV2ApiMapping(), + "aws_apigatewayv2_authorizer": resourceAwsApiGatewayV2Authorizer(), + "aws_apigatewayv2_deployment": resourceAwsApiGatewayV2Deployment(), + "aws_apigatewayv2_domain_name": resourceAwsApiGatewayV2DomainName(), + "aws_apigatewayv2_integration": resourceAwsApiGatewayV2Integration(), + "aws_apigatewayv2_integration_response": resourceAwsApiGatewayV2IntegrationResponse(), + "aws_apigatewayv2_model": resourceAwsApiGatewayV2Model(), + "aws_apigatewayv2_route": resourceAwsApiGatewayV2Route(), + "aws_apigatewayv2_route_response": resourceAwsApiGatewayV2RouteResponse(), + "aws_apigatewayv2_stage": resourceAwsApiGatewayV2Stage(), + "aws_apigatewayv2_vpc_link": resourceAwsApiGatewayV2VpcLink(), + "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), + "aws_appautoscaling_target": resourceAwsAppautoscalingTarget(), + "aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(), + "aws_appautoscaling_scheduled_action": resourceAwsAppautoscalingScheduledAction(), + "aws_appconfig_application": resourceAwsAppconfigApplication(), + "aws_appconfig_configuration_profile": resourceAwsAppconfigConfigurationProfile(), + "aws_appconfig_deployment": resourceAwsAppconfigDeployment(), + "aws_appconfig_deployment_strategy": resourceAwsAppconfigDeploymentStrategy(), + "aws_appconfig_environment": resourceAwsAppconfigEnvironment(), + "aws_appconfig_hosted_configuration_version": resourceAwsAppconfigHostedConfigurationVersion(), + "aws_appmesh_gateway_route": resourceAwsAppmeshGatewayRoute(), + "aws_appmesh_mesh": resourceAwsAppmeshMesh(), + "aws_appmesh_route": resourceAwsAppmeshRoute(), + "aws_appmesh_virtual_gateway": resourceAwsAppmeshVirtualGateway(), + "aws_appmesh_virtual_node": resourceAwsAppmeshVirtualNode(), + "aws_appmesh_virtual_router": resourceAwsAppmeshVirtualRouter(), + "aws_appmesh_virtual_service": resourceAwsAppmeshVirtualService(), + "aws_apprunner_auto_scaling_configuration_version": resourceAwsAppRunnerAutoScalingConfigurationVersion(), + "aws_apprunner_connection": resourceAwsAppRunnerConnection(), + "aws_apprunner_custom_domain_association": resourceAwsAppRunnerCustomDomainAssociation(), + "aws_apprunner_service": resourceAwsAppRunnerService(), + "aws_appsync_api_key": resourceAwsAppsyncApiKey(), + "aws_appsync_datasource": resourceAwsAppsyncDatasource(), + "aws_appsync_function": resourceAwsAppsyncFunction(), + "aws_appsync_graphql_api": resourceAwsAppsyncGraphqlApi(), + "aws_appsync_resolver": resourceAwsAppsyncResolver(), + "aws_athena_database": resourceAwsAthenaDatabase(), + "aws_athena_named_query": resourceAwsAthenaNamedQuery(), + "aws_athena_workgroup": resourceAwsAthenaWorkgroup(), + "aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(), + "aws_autoscaling_group": resourceAwsAutoscalingGroup(), + "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), + "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), + "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(), + "aws_autoscalingplans_scaling_plan": resourceAwsAutoScalingPlansScalingPlan(), + "aws_backup_global_settings": resourceAwsBackupGlobalSettings(), + "aws_backup_plan": resourceAwsBackupPlan(), + "aws_backup_region_settings": resourceAwsBackupRegionSettings(), + "aws_backup_selection": resourceAwsBackupSelection(), + "aws_backup_vault": resourceAwsBackupVault(), + "aws_backup_vault_notifications": resourceAwsBackupVaultNotifications(), + "aws_backup_vault_policy": resourceAwsBackupVaultPolicy(), + "aws_budgets_budget": resourceAwsBudgetsBudget(), + "aws_budgets_budget_action": resourceAwsBudgetsBudgetAction(), + "aws_cloud9_environment_ec2": resourceAwsCloud9EnvironmentEc2(), + "aws_cloudformation_stack": resourceAwsCloudFormationStack(), + "aws_cloudformation_stack_set": resourceAwsCloudFormationStackSet(), + "aws_cloudformation_stack_set_instance": resourceAwsCloudFormationStackSetInstance(), + "aws_cloudformation_type": resourceAwsCloudFormationType(), + "aws_cloudfront_cache_policy": resourceAwsCloudFrontCachePolicy(), + "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), + "aws_cloudfront_function": resourceAwsCloudFrontFunction(), + "aws_cloudfront_key_group": resourceAwsCloudFrontKeyGroup(), + "aws_cloudfront_monitoring_subscription": resourceAwsCloudFrontMonitoringSubscription(), + "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), + "aws_cloudfront_origin_request_policy": resourceAwsCloudFrontOriginRequestPolicy(), + "aws_cloudfront_public_key": resourceAwsCloudFrontPublicKey(), + "aws_cloudfront_realtime_log_config": resourceAwsCloudFrontRealtimeLogConfig(), + "aws_cloudtrail": resourceAwsCloudTrail(), + "aws_cloudwatch_event_bus": resourceAwsCloudWatchEventBus(), + "aws_cloudwatch_event_bus_policy": resourceAwsCloudWatchEventBusPolicy(), + "aws_cloudwatch_event_permission": resourceAwsCloudWatchEventPermission(), + "aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(), + "aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(), + "aws_cloudwatch_event_archive": resourceAwsCloudWatchEventArchive(), + "aws_cloudwatch_event_connection": resourceAwsCloudWatchEventConnection(), + "aws_cloudwatch_event_api_destination": resourceAwsCloudWatchEventApiDestination(), + "aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(), + "aws_cloudwatch_log_destination_policy": resourceAwsCloudWatchLogDestinationPolicy(), + "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), + "aws_cloudwatch_log_metric_filter": resourceAwsCloudWatchLogMetricFilter(), + "aws_cloudwatch_log_resource_policy": resourceAwsCloudWatchLogResourcePolicy(), + "aws_cloudwatch_log_stream": resourceAwsCloudWatchLogStream(), + "aws_cloudwatch_log_subscription_filter": resourceAwsCloudwatchLogSubscriptionFilter(), + "aws_config_aggregate_authorization": resourceAwsConfigAggregateAuthorization(), + "aws_config_config_rule": resourceAwsConfigConfigRule(), + "aws_config_configuration_aggregator": resourceAwsConfigConfigurationAggregator(), + "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), + "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), + "aws_config_conformance_pack": resourceAwsConfigConformancePack(), + "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), + "aws_config_organization_conformance_pack": resourceAwsConfigOrganizationConformancePack(), + "aws_config_organization_custom_rule": resourceAwsConfigOrganizationCustomRule(), + "aws_config_organization_managed_rule": resourceAwsConfigOrganizationManagedRule(), + "aws_config_remediation_configuration": resourceAwsConfigRemediationConfiguration(), + "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), + "aws_cognito_identity_pool_roles_attachment": resourceAwsCognitoIdentityPoolRolesAttachment(), + "aws_cognito_identity_provider": resourceAwsCognitoIdentityProvider(), + "aws_cognito_resource_server": resourceAwsCognitoResourceServer(), + "aws_cognito_user_group": resourceAwsCognitoUserGroup(), + "aws_cognito_user_pool": resourceAwsCognitoUserPool(), + "aws_cognito_user_pool_client": resourceAwsCognitoUserPoolClient(), + "aws_cognito_user_pool_domain": resourceAwsCognitoUserPoolDomain(), + "aws_cognito_user_pool_ui_customization": resourceAwsCognitoUserPoolUICustomization(), + "aws_cloudhsm_v2_cluster": resourceAwsCloudHsmV2Cluster(), + "aws_cloudhsm_v2_hsm": resourceAwsCloudHsmV2Hsm(), + "aws_cloudwatch_composite_alarm": resourceAwsCloudWatchCompositeAlarm(), + "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), + "aws_cloudwatch_dashboard": resourceAwsCloudWatchDashboard(), + "aws_cloudwatch_metric_stream": resourceAwsCloudWatchMetricStream(), + "aws_cloudwatch_query_definition": resourceAwsCloudWatchQueryDefinition(), + "aws_codedeploy_app": resourceAwsCodeDeployApp(), + "aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(), + "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), + "aws_codecommit_repository": resourceAwsCodeCommitRepository(), + "aws_codecommit_trigger": resourceAwsCodeCommitTrigger(), + "aws_codeartifact_domain": resourceAwsCodeArtifactDomain(), + "aws_codeartifact_domain_permissions_policy": resourceAwsCodeArtifactDomainPermissionsPolicy(), + "aws_codeartifact_repository": resourceAwsCodeArtifactRepository(), + "aws_codeartifact_repository_permissions_policy": resourceAwsCodeArtifactRepositoryPermissionsPolicy(), + "aws_codebuild_project": resourceAwsCodeBuildProject(), + "aws_codebuild_report_group": resourceAwsCodeBuildReportGroup(), + "aws_codebuild_source_credential": resourceAwsCodeBuildSourceCredential(), + "aws_codebuild_webhook": resourceAwsCodeBuildWebhook(), + "aws_codepipeline": resourceAwsCodePipeline(), + "aws_codepipeline_webhook": resourceAwsCodePipelineWebhook(), + "aws_codestarconnections_connection": resourceAwsCodeStarConnectionsConnection(), + "aws_codestarconnections_host": resourceAwsCodeStarConnectionsHost(), + "aws_codestarnotifications_notification_rule": resourceAwsCodeStarNotificationsNotificationRule(), + "aws_cur_report_definition": resourceAwsCurReportDefinition(), + "aws_customer_gateway": resourceAwsCustomerGateway(), + "aws_datapipeline_pipeline": resourceAwsDataPipelinePipeline(), + "aws_datasync_agent": resourceAwsDataSyncAgent(), + "aws_datasync_location_efs": resourceAwsDataSyncLocationEfs(), + "aws_datasync_location_fsx_windows_file_system": resourceAwsDataSyncLocationFsxWindowsFileSystem(), + "aws_datasync_location_nfs": resourceAwsDataSyncLocationNfs(), + "aws_datasync_location_s3": resourceAwsDataSyncLocationS3(), + "aws_datasync_location_smb": resourceAwsDataSyncLocationSmb(), + "aws_datasync_task": resourceAwsDataSyncTask(), + "aws_dax_cluster": resourceAwsDaxCluster(), + "aws_dax_parameter_group": resourceAwsDaxParameterGroup(), + "aws_dax_subnet_group": resourceAwsDaxSubnetGroup(), + "aws_db_cluster_snapshot": resourceAwsDbClusterSnapshot(), + "aws_db_event_subscription": resourceAwsDbEventSubscription(), + "aws_db_instance": resourceAwsDbInstance(), + "aws_db_instance_role_association": resourceAwsDbInstanceRoleAssociation(), + "aws_db_option_group": resourceAwsDbOptionGroup(), + "aws_db_parameter_group": resourceAwsDbParameterGroup(), + "aws_db_proxy": resourceAwsDbProxy(), + "aws_db_proxy_default_target_group": resourceAwsDbProxyDefaultTargetGroup(), + "aws_db_proxy_endpoint": resourceAwsDbProxyEndpoint(), + "aws_db_proxy_target": resourceAwsDbProxyTarget(), + "aws_db_security_group": resourceAwsDbSecurityGroup(), + "aws_db_snapshot": resourceAwsDbSnapshot(), + "aws_db_subnet_group": resourceAwsDbSubnetGroup(), + "aws_devicefarm_project": resourceAwsDevicefarmProject(), + "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), + "aws_directory_service_conditional_forwarder": resourceAwsDirectoryServiceConditionalForwarder(), + "aws_directory_service_log_subscription": resourceAwsDirectoryServiceLogSubscription(), + "aws_dlm_lifecycle_policy": resourceAwsDlmLifecyclePolicy(), + "aws_dms_certificate": resourceAwsDmsCertificate(), + "aws_dms_endpoint": resourceAwsDmsEndpoint(), + "aws_dms_event_subscription": resourceAwsDmsEventSubscription(), + "aws_dms_replication_instance": resourceAwsDmsReplicationInstance(), + "aws_dms_replication_subnet_group": resourceAwsDmsReplicationSubnetGroup(), + "aws_dms_replication_task": resourceAwsDmsReplicationTask(), + "aws_docdb_cluster": resourceAwsDocDBCluster(), + "aws_docdb_cluster_instance": resourceAwsDocDBClusterInstance(), + "aws_docdb_cluster_parameter_group": resourceAwsDocDBClusterParameterGroup(), + "aws_docdb_cluster_snapshot": resourceAwsDocDBClusterSnapshot(), + "aws_docdb_subnet_group": resourceAwsDocDBSubnetGroup(), + "aws_dx_bgp_peer": resourceAwsDxBgpPeer(), + "aws_dx_connection": resourceAwsDxConnection(), + "aws_dx_connection_association": resourceAwsDxConnectionAssociation(), + "aws_dx_gateway": resourceAwsDxGateway(), + "aws_dx_gateway_association": resourceAwsDxGatewayAssociation(), + "aws_dx_gateway_association_proposal": resourceAwsDxGatewayAssociationProposal(), + "aws_dx_hosted_private_virtual_interface": resourceAwsDxHostedPrivateVirtualInterface(), + "aws_dx_hosted_private_virtual_interface_accepter": resourceAwsDxHostedPrivateVirtualInterfaceAccepter(), + "aws_dx_hosted_public_virtual_interface": resourceAwsDxHostedPublicVirtualInterface(), + "aws_dx_hosted_public_virtual_interface_accepter": resourceAwsDxHostedPublicVirtualInterfaceAccepter(), + "aws_dx_hosted_transit_virtual_interface": resourceAwsDxHostedTransitVirtualInterface(), + "aws_dx_hosted_transit_virtual_interface_accepter": resourceAwsDxHostedTransitVirtualInterfaceAccepter(), + "aws_dx_lag": resourceAwsDxLag(), + "aws_dx_private_virtual_interface": resourceAwsDxPrivateVirtualInterface(), + "aws_dx_public_virtual_interface": resourceAwsDxPublicVirtualInterface(), + "aws_dx_transit_virtual_interface": resourceAwsDxTransitVirtualInterface(), + "aws_dynamodb_table": resourceAwsDynamoDbTable(), + "aws_dynamodb_table_item": resourceAwsDynamoDbTableItem(), + "aws_dynamodb_global_table": resourceAwsDynamoDbGlobalTable(), + "aws_dynamodb_kinesis_streaming_destination": resourceAwsDynamoDbKinesisStreamingDestination(), + "aws_ebs_default_kms_key": resourceAwsEbsDefaultKmsKey(), + "aws_ebs_encryption_by_default": resourceAwsEbsEncryptionByDefault(), + "aws_ebs_snapshot": resourceAwsEbsSnapshot(), + "aws_ebs_snapshot_copy": resourceAwsEbsSnapshotCopy(), + "aws_ebs_snapshot_import": resourceAwsEbsSnapshotImport(), + "aws_ebs_volume": resourceAwsEbsVolume(), + "aws_ec2_availability_zone_group": resourceAwsEc2AvailabilityZoneGroup(), + "aws_ec2_capacity_reservation": resourceAwsEc2CapacityReservation(), + "aws_ec2_carrier_gateway": resourceAwsEc2CarrierGateway(), + "aws_ec2_client_vpn_authorization_rule": resourceAwsEc2ClientVpnAuthorizationRule(), + "aws_ec2_client_vpn_endpoint": resourceAwsEc2ClientVpnEndpoint(), + "aws_ec2_client_vpn_network_association": resourceAwsEc2ClientVpnNetworkAssociation(), + "aws_ec2_client_vpn_route": resourceAwsEc2ClientVpnRoute(), + "aws_ec2_fleet": resourceAwsEc2Fleet(), + "aws_ec2_local_gateway_route": resourceAwsEc2LocalGatewayRoute(), + "aws_ec2_local_gateway_route_table_vpc_association": resourceAwsEc2LocalGatewayRouteTableVpcAssociation(), + "aws_ec2_managed_prefix_list": resourceAwsEc2ManagedPrefixList(), + "aws_ec2_tag": resourceAwsEc2Tag(), + "aws_ec2_traffic_mirror_filter": resourceAwsEc2TrafficMirrorFilter(), + "aws_ec2_traffic_mirror_filter_rule": resourceAwsEc2TrafficMirrorFilterRule(), + "aws_ec2_traffic_mirror_target": resourceAwsEc2TrafficMirrorTarget(), + "aws_ec2_traffic_mirror_session": resourceAwsEc2TrafficMirrorSession(), + "aws_ec2_transit_gateway": resourceAwsEc2TransitGateway(), + "aws_ec2_transit_gateway_peering_attachment": resourceAwsEc2TransitGatewayPeeringAttachment(), + "aws_ec2_transit_gateway_peering_attachment_accepter": resourceAwsEc2TransitGatewayPeeringAttachmentAccepter(), + "aws_ec2_transit_gateway_prefix_list_reference": resourceAwsEc2TransitGatewayPrefixListReference(), + "aws_ec2_transit_gateway_route": resourceAwsEc2TransitGatewayRoute(), + "aws_ec2_transit_gateway_route_table": resourceAwsEc2TransitGatewayRouteTable(), + "aws_ec2_transit_gateway_route_table_association": resourceAwsEc2TransitGatewayRouteTableAssociation(), + "aws_ec2_transit_gateway_route_table_propagation": resourceAwsEc2TransitGatewayRouteTablePropagation(), + "aws_ec2_transit_gateway_vpc_attachment": resourceAwsEc2TransitGatewayVpcAttachment(), + "aws_ec2_transit_gateway_vpc_attachment_accepter": resourceAwsEc2TransitGatewayVpcAttachmentAccepter(), + "aws_ecr_lifecycle_policy": resourceAwsEcrLifecyclePolicy(), + "aws_ecrpublic_repository": resourceAwsEcrPublicRepository(), + "aws_ecr_registry_policy": resourceAwsEcrRegistryPolicy(), + "aws_ecr_replication_configuration": resourceAwsEcrReplicationConfiguration(), + "aws_ecr_repository": resourceAwsEcrRepository(), + "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), + "aws_ecs_capacity_provider": resourceAwsEcsCapacityProvider(), + "aws_ecs_cluster": resourceAwsEcsCluster(), + "aws_ecs_service": resourceAwsEcsService(), + "aws_ecs_task_definition": resourceAwsEcsTaskDefinition(), + "aws_efs_access_point": resourceAwsEfsAccessPoint(), + "aws_efs_backup_policy": resourceAwsEfsBackupPolicy(), + "aws_efs_file_system": resourceAwsEfsFileSystem(), + "aws_efs_file_system_policy": resourceAwsEfsFileSystemPolicy(), + "aws_efs_mount_target": resourceAwsEfsMountTarget(), + "aws_egress_only_internet_gateway": resourceAwsEgressOnlyInternetGateway(), + "aws_eip": resourceAwsEip(), + "aws_eip_association": resourceAwsEipAssociation(), + "aws_eks_cluster": resourceAwsEksCluster(), + "aws_eks_addon": resourceAwsEksAddon(), + "aws_eks_fargate_profile": resourceAwsEksFargateProfile(), + "aws_eks_identity_provider_config": resourceAwsEksIdentityProviderConfig(), + "aws_eks_node_group": resourceAwsEksNodeGroup(), + "aws_elasticache_cluster": resourceAwsElasticacheCluster(), + "aws_elasticache_global_replication_group": resourceAwsElasticacheGlobalReplicationGroup(), + "aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(), + "aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(), + "aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(), + "aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(), + "aws_elasticache_user": resourceAwsElasticacheUser(), + "aws_elasticache_user_group": resourceAwsElasticacheUserGroup(), + "aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(), + "aws_elastic_beanstalk_application_version": resourceAwsElasticBeanstalkApplicationVersion(), + "aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(), + "aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(), + "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), + "aws_elasticsearch_domain_policy": resourceAwsElasticSearchDomainPolicy(), + "aws_elasticsearch_domain_saml_options": resourceAwsElasticSearchDomainSAMLOptions(), + "aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(), + "aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(), + "aws_elb": resourceAwsElb(), + "aws_elb_attachment": resourceAwsElbAttachment(), + "aws_emr_cluster": resourceAwsEMRCluster(), + "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), + "aws_emr_instance_fleet": resourceAwsEMRInstanceFleet(), + "aws_emr_managed_scaling_policy": resourceAwsEMRManagedScalingPolicy(), + "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), + "aws_flow_log": resourceAwsFlowLog(), + "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), + "aws_fsx_windows_file_system": resourceAwsFsxWindowsFileSystem(), + "aws_fms_admin_account": resourceAwsFmsAdminAccount(), + "aws_fms_policy": resourceAwsFmsPolicy(), + "aws_gamelift_alias": resourceAwsGameliftAlias(), + "aws_gamelift_build": resourceAwsGameliftBuild(), + "aws_gamelift_fleet": resourceAwsGameliftFleet(), + "aws_gamelift_game_session_queue": resourceAwsGameliftGameSessionQueue(), + "aws_glacier_vault": resourceAwsGlacierVault(), + "aws_glacier_vault_lock": resourceAwsGlacierVaultLock(), + "aws_globalaccelerator_accelerator": resourceAwsGlobalAcceleratorAccelerator(), + "aws_globalaccelerator_endpoint_group": resourceAwsGlobalAcceleratorEndpointGroup(), + "aws_globalaccelerator_listener": resourceAwsGlobalAcceleratorListener(), + "aws_glue_catalog_database": resourceAwsGlueCatalogDatabase(), + "aws_glue_catalog_table": resourceAwsGlueCatalogTable(), + "aws_glue_classifier": resourceAwsGlueClassifier(), + "aws_glue_connection": resourceAwsGlueConnection(), + "aws_glue_dev_endpoint": resourceAwsGlueDevEndpoint(), + "aws_glue_crawler": resourceAwsGlueCrawler(), + "aws_glue_data_catalog_encryption_settings": resourceAwsGlueDataCatalogEncryptionSettings(), + "aws_glue_job": resourceAwsGlueJob(), + "aws_glue_ml_transform": resourceAwsGlueMLTransform(), + "aws_glue_partition": resourceAwsGluePartition(), + "aws_glue_registry": resourceAwsGlueRegistry(), + "aws_glue_resource_policy": resourceAwsGlueResourcePolicy(), + "aws_glue_schema": resourceAwsGlueSchema(), + "aws_glue_security_configuration": resourceAwsGlueSecurityConfiguration(), + "aws_glue_trigger": resourceAwsGlueTrigger(), + "aws_glue_user_defined_function": resourceAwsGlueUserDefinedFunction(), + "aws_glue_workflow": resourceAwsGlueWorkflow(), + "aws_guardduty_detector": resourceAwsGuardDutyDetector(), + "aws_guardduty_filter": resourceAwsGuardDutyFilter(), + "aws_guardduty_invite_accepter": resourceAwsGuardDutyInviteAccepter(), + "aws_guardduty_ipset": resourceAwsGuardDutyIpset(), + "aws_guardduty_member": resourceAwsGuardDutyMember(), + "aws_guardduty_organization_admin_account": resourceAwsGuardDutyOrganizationAdminAccount(), + "aws_guardduty_organization_configuration": resourceAwsGuardDutyOrganizationConfiguration(), + "aws_guardduty_publishing_destination": resourceAwsGuardDutyPublishingDestination(), + "aws_guardduty_threatintelset": resourceAwsGuardDutyThreatintelset(), + "aws_iam_access_key": resourceAwsIamAccessKey(), + "aws_iam_account_alias": resourceAwsIamAccountAlias(), + "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), + "aws_iam_group_policy": resourceAwsIamGroupPolicy(), + "aws_iam_group": resourceAwsIamGroup(), + "aws_iam_group_membership": resourceAwsIamGroupMembership(), + "aws_iam_group_policy_attachment": resourceAwsIamGroupPolicyAttachment(), + "aws_iam_instance_profile": resourceAwsIamInstanceProfile(), + "aws_iam_openid_connect_provider": resourceAwsIamOpenIDConnectProvider(), + "aws_iam_policy": resourceAwsIamPolicy(), + "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), + "aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(), + "aws_iam_role_policy": resourceAwsIamRolePolicy(), + "aws_iam_role": resourceAwsIamRole(), + "aws_iam_saml_provider": resourceAwsIamSamlProvider(), + "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), + "aws_iam_service_linked_role": resourceAwsIamServiceLinkedRole(), + "aws_iam_user_group_membership": resourceAwsIamUserGroupMembership(), + "aws_iam_user_policy_attachment": resourceAwsIamUserPolicyAttachment(), + "aws_iam_user_policy": resourceAwsIamUserPolicy(), + "aws_iam_user_ssh_key": resourceAwsIamUserSshKey(), + "aws_iam_user": resourceAwsIamUser(), + "aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(), + "aws_imagebuilder_component": resourceAwsImageBuilderComponent(), + "aws_imagebuilder_distribution_configuration": resourceAwsImageBuilderDistributionConfiguration(), + "aws_imagebuilder_image": resourceAwsImageBuilderImage(), + "aws_imagebuilder_image_pipeline": resourceAwsImageBuilderImagePipeline(), + "aws_imagebuilder_image_recipe": resourceAwsImageBuilderImageRecipe(), + "aws_imagebuilder_infrastructure_configuration": resourceAwsImageBuilderInfrastructureConfiguration(), + "aws_inspector_assessment_target": resourceAWSInspectorAssessmentTarget(), + "aws_inspector_assessment_template": resourceAWSInspectorAssessmentTemplate(), + "aws_inspector_resource_group": resourceAWSInspectorResourceGroup(), + "aws_instance": resourceAwsInstance(), + "aws_internet_gateway": resourceAwsInternetGateway(), + "aws_iot_certificate": resourceAwsIotCertificate(), + "aws_iot_policy": resourceAwsIotPolicy(), + "aws_iot_policy_attachment": resourceAwsIotPolicyAttachment(), + "aws_iot_thing": resourceAwsIotThing(), + "aws_iot_thing_principal_attachment": resourceAwsIotThingPrincipalAttachment(), + "aws_iot_thing_type": resourceAwsIotThingType(), + "aws_iot_topic_rule": resourceAwsIotTopicRule(), + "aws_iot_role_alias": resourceAwsIotRoleAlias(), + "aws_key_pair": resourceAwsKeyPair(), + "aws_kinesis_analytics_application": resourceAwsKinesisAnalyticsApplication(), + "aws_kinesisanalyticsv2_application": resourceAwsKinesisAnalyticsV2Application(), + "aws_kinesisanalyticsv2_application_snapshot": resourceAwsKinesisAnalyticsV2ApplicationSnapshot(), + "aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(), + "aws_kinesis_stream": resourceAwsKinesisStream(), + "aws_kinesis_stream_consumer": resourceAwsKinesisStreamConsumer(), + "aws_kinesis_video_stream": resourceAwsKinesisVideoStream(), + "aws_kms_alias": resourceAwsKmsAlias(), + "aws_kms_external_key": resourceAwsKmsExternalKey(), + "aws_kms_grant": resourceAwsKmsGrant(), + "aws_kms_key": resourceAwsKmsKey(), + "aws_kms_ciphertext": resourceAwsKmsCiphertext(), + "aws_lakeformation_data_lake_settings": resourceAwsLakeFormationDataLakeSettings(), + "aws_lakeformation_permissions": resourceAwsLakeFormationPermissions(), + "aws_lakeformation_resource": resourceAwsLakeFormationResource(), + "aws_lambda_alias": resourceAwsLambdaAlias(), + "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), + "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), + "aws_lambda_function_event_invoke_config": resourceAwsLambdaFunctionEventInvokeConfig(), + "aws_lambda_function": resourceAwsLambdaFunction(), + "aws_lambda_layer_version": resourceAwsLambdaLayerVersion(), + "aws_lambda_permission": resourceAwsLambdaPermission(), + "aws_lambda_provisioned_concurrency_config": resourceAwsLambdaProvisionedConcurrencyConfig(), + "aws_launch_configuration": resourceAwsLaunchConfiguration(), + "aws_launch_template": resourceAwsLaunchTemplate(), + "aws_lex_bot": resourceAwsLexBot(), + "aws_lex_bot_alias": resourceAwsLexBotAlias(), + "aws_lex_intent": resourceAwsLexIntent(), + "aws_lex_slot_type": resourceAwsLexSlotType(), + "aws_licensemanager_association": resourceAwsLicenseManagerAssociation(), + "aws_licensemanager_license_configuration": resourceAwsLicenseManagerLicenseConfiguration(), + "aws_lightsail_domain": resourceAwsLightsailDomain(), + "aws_lightsail_instance": resourceAwsLightsailInstance(), + "aws_lightsail_instance_public_ports": resourceAwsLightsailInstancePublicPorts(), + "aws_lightsail_key_pair": resourceAwsLightsailKeyPair(), + "aws_lightsail_static_ip": resourceAwsLightsailStaticIp(), + "aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(), + "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), + "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), + "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), + "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), + "aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(), + "aws_macie2_account": resourceAwsMacie2Account(), + "aws_macie2_classification_job": resourceAwsMacie2ClassificationJob(), + "aws_macie2_custom_data_identifier": resourceAwsMacie2CustomDataIdentifier(), + "aws_macie2_findings_filter": resourceAwsMacie2FindingsFilter(), + "aws_macie2_invitation_accepter": resourceAwsMacie2InvitationAccepter(), + "aws_macie2_member": resourceAwsMacie2Member(), + "aws_macie2_organization_admin_account": resourceAwsMacie2OrganizationAdminAccount(), + "aws_macie_member_account_association": resourceAwsMacieMemberAccountAssociation(), + "aws_macie_s3_bucket_association": resourceAwsMacieS3BucketAssociation(), + "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), + "aws_mq_broker": resourceAwsMqBroker(), + "aws_mq_configuration": resourceAwsMqConfiguration(), + "aws_media_convert_queue": resourceAwsMediaConvertQueue(), + "aws_media_package_channel": resourceAwsMediaPackageChannel(), + "aws_media_store_container": resourceAwsMediaStoreContainer(), + "aws_media_store_container_policy": resourceAwsMediaStoreContainerPolicy(), + "aws_msk_cluster": resourceAwsMskCluster(), + "aws_msk_configuration": resourceAwsMskConfiguration(), + "aws_msk_scram_secret_association": resourceAwsMskScramSecretAssociation(), + "aws_mwaa_environment": resourceAwsMwaaEnvironment(), + "aws_nat_gateway": resourceAwsNatGateway(), + "aws_network_acl": resourceAwsNetworkAcl(), + "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), + "aws_neptune_cluster": resourceAwsNeptuneCluster(), + "aws_neptune_cluster_endpoint": resourceAwsNeptuneClusterEndpoint(), + "aws_neptune_cluster_instance": resourceAwsNeptuneClusterInstance(), + "aws_neptune_cluster_parameter_group": resourceAwsNeptuneClusterParameterGroup(), + "aws_neptune_cluster_snapshot": resourceAwsNeptuneClusterSnapshot(), + "aws_neptune_event_subscription": resourceAwsNeptuneEventSubscription(), + "aws_neptune_parameter_group": resourceAwsNeptuneParameterGroup(), + "aws_neptune_subnet_group": resourceAwsNeptuneSubnetGroup(), + "aws_network_acl_rule": resourceAwsNetworkAclRule(), + "aws_network_interface": resourceAwsNetworkInterface(), + "aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(), + "aws_networkfirewall_firewall": resourceAwsNetworkFirewallFirewall(), + "aws_networkfirewall_firewall_policy": resourceAwsNetworkFirewallFirewallPolicy(), + "aws_networkfirewall_logging_configuration": resourceAwsNetworkFirewallLoggingConfiguration(), + "aws_networkfirewall_resource_policy": resourceAwsNetworkFirewallResourcePolicy(), + "aws_networkfirewall_rule_group": resourceAwsNetworkFirewallRuleGroup(), + "aws_opsworks_application": resourceAwsOpsworksApplication(), + "aws_opsworks_stack": resourceAwsOpsworksStack(), + "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), + "aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(), + "aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(), + "aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(), + "aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(), + "aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(), + "aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(), + "aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(), + "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), + "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), + "aws_opsworks_instance": resourceAwsOpsworksInstance(), + "aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(), + "aws_opsworks_permission": resourceAwsOpsworksPermission(), + "aws_opsworks_rds_db_instance": resourceAwsOpsworksRdsDbInstance(), + "aws_organizations_organization": resourceAwsOrganizationsOrganization(), + "aws_organizations_account": resourceAwsOrganizationsAccount(), + "aws_organizations_delegated_administrator": resourceAwsOrganizationsDelegatedAdministrator(), + "aws_organizations_policy": resourceAwsOrganizationsPolicy(), + "aws_organizations_policy_attachment": resourceAwsOrganizationsPolicyAttachment(), + "aws_organizations_organizational_unit": resourceAwsOrganizationsOrganizationalUnit(), + "aws_placement_group": resourceAwsPlacementGroup(), + "aws_prometheus_workspace": resourceAwsPrometheusWorkspace(), + "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), + "aws_qldb_ledger": resourceAwsQLDBLedger(), + "aws_quicksight_group": resourceAwsQuickSightGroup(), + "aws_quicksight_user": resourceAwsQuickSightUser(), + "aws_ram_principal_association": resourceAwsRamPrincipalAssociation(), + "aws_ram_resource_association": resourceAwsRamResourceAssociation(), + "aws_ram_resource_share": resourceAwsRamResourceShare(), + "aws_ram_resource_share_accepter": resourceAwsRamResourceShareAccepter(), + "aws_rds_cluster": resourceAwsRDSCluster(), + "aws_rds_cluster_endpoint": resourceAwsRDSClusterEndpoint(), + "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), + "aws_rds_cluster_parameter_group": resourceAwsRDSClusterParameterGroup(), + "aws_rds_cluster_role_association": resourceAwsRDSClusterRoleAssociation(), + "aws_rds_global_cluster": resourceAwsRDSGlobalCluster(), + "aws_redshift_cluster": resourceAwsRedshiftCluster(), + "aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(), + "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), + "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), + "aws_redshift_snapshot_copy_grant": resourceAwsRedshiftSnapshotCopyGrant(), + "aws_redshift_snapshot_schedule": resourceAwsRedshiftSnapshotSchedule(), + "aws_redshift_snapshot_schedule_association": resourceAwsRedshiftSnapshotScheduleAssociation(), + "aws_redshift_event_subscription": resourceAwsRedshiftEventSubscription(), + "aws_resourcegroups_group": resourceAwsResourceGroupsGroup(), + "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), + "aws_route53_hosted_zone_dnssec": resourceAwsRoute53HostedZoneDnssec(), + "aws_route53_key_signing_key": resourceAwsRoute53KeySigningKey(), + "aws_route53_query_log": resourceAwsRoute53QueryLog(), + "aws_route53_record": resourceAwsRoute53Record(), + "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), + "aws_route53_vpc_association_authorization": resourceAwsRoute53VPCAssociationAuthorization(), + "aws_route53_zone": resourceAwsRoute53Zone(), + "aws_route53_health_check": resourceAwsRoute53HealthCheck(), + "aws_route53_resolver_dnssec_config": resourceAwsRoute53ResolverDnssecConfig(), + "aws_route53_resolver_endpoint": resourceAwsRoute53ResolverEndpoint(), + "aws_route53_resolver_firewall_config": resourceAwsRoute53ResolverFirewallConfig(), + "aws_route53_resolver_firewall_domain_list": resourceAwsRoute53ResolverFirewallDomainList(), + "aws_route53_resolver_firewall_rule": resourceAwsRoute53ResolverFirewallRule(), + "aws_route53_resolver_firewall_rule_group": resourceAwsRoute53ResolverFirewallRuleGroup(), + "aws_route53_resolver_firewall_rule_group_association": resourceAwsRoute53ResolverFirewallRuleGroupAssociation(), + "aws_route53_resolver_query_log_config": resourceAwsRoute53ResolverQueryLogConfig(), + "aws_route53_resolver_query_log_config_association": resourceAwsRoute53ResolverQueryLogConfigAssociation(), + "aws_route53_resolver_rule_association": resourceAwsRoute53ResolverRuleAssociation(), + "aws_route53_resolver_rule": resourceAwsRoute53ResolverRule(), + "aws_route": resourceAwsRoute(), + "aws_route_table": resourceAwsRouteTable(), + "aws_default_route_table": resourceAwsDefaultRouteTable(), + "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_sagemaker_app": resourceAwsSagemakerApp(), + "aws_sagemaker_app_image_config": resourceAwsSagemakerAppImageConfig(), + "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), + "aws_sagemaker_domain": resourceAwsSagemakerDomain(), + "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), + "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), + "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), + "aws_sagemaker_image": resourceAwsSagemakerImage(), + "aws_sagemaker_image_version": resourceAwsSagemakerImageVersion(), + "aws_sagemaker_model": resourceAwsSagemakerModel(), + "aws_sagemaker_model_package_group": resourceAwsSagemakerModelPackageGroup(), + "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), + "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), + "aws_sagemaker_user_profile": resourceAwsSagemakerUserProfile(), + "aws_sagemaker_workforce": resourceAwsSagemakerWorkforce(), + "aws_sagemaker_workteam": resourceAwsSagemakerWorkteam(), + "aws_schemas_discoverer": resourceAwsSchemasDiscoverer(), + "aws_schemas_registry": resourceAwsSchemasRegistry(), + "aws_schemas_schema": resourceAwsSchemasSchema(), + "aws_secretsmanager_secret": resourceAwsSecretsManagerSecret(), + "aws_secretsmanager_secret_policy": resourceAwsSecretsManagerSecretPolicy(), + "aws_secretsmanager_secret_version": resourceAwsSecretsManagerSecretVersion(), + "aws_secretsmanager_secret_rotation": resourceAwsSecretsManagerSecretRotation(), + "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), + "aws_ses_domain_identity": resourceAwsSesDomainIdentity(), + "aws_ses_domain_identity_verification": resourceAwsSesDomainIdentityVerification(), + "aws_ses_domain_dkim": resourceAwsSesDomainDkim(), + "aws_ses_domain_mail_from": resourceAwsSesDomainMailFrom(), + "aws_ses_email_identity": resourceAwsSesEmailIdentity(), + "aws_ses_identity_policy": resourceAwsSesIdentityPolicy(), + "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), + "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), + "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), + "aws_ses_configuration_set": resourceAwsSesConfigurationSet(), + "aws_ses_event_destination": resourceAwsSesEventDestination(), + "aws_ses_identity_notification_topic": resourceAwsSesNotificationTopic(), + "aws_ses_template": resourceAwsSesTemplate(), + "aws_s3_access_point": resourceAwsS3AccessPoint(), + "aws_s3_account_public_access_block": resourceAwsS3AccountPublicAccessBlock(), + "aws_s3_bucket": resourceAwsS3Bucket(), + "aws_s3_bucket_analytics_configuration": resourceAwsS3BucketAnalyticsConfiguration(), + "aws_s3_bucket_policy": resourceAwsS3BucketPolicy(), + "aws_s3_bucket_public_access_block": resourceAwsS3BucketPublicAccessBlock(), + "aws_s3_bucket_object": resourceAwsS3BucketObject(), + "aws_s3_bucket_ownership_controls": resourceAwsS3BucketOwnershipControls(), + "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), + "aws_s3_bucket_metric": resourceAwsS3BucketMetric(), + "aws_s3_bucket_inventory": resourceAwsS3BucketInventory(), + "aws_s3_bucket_replication_configuration": resourceAwsS3BucketReplicationConfiguration(), + "aws_s3_object_copy": resourceAwsS3ObjectCopy(), + "aws_s3control_bucket": resourceAwsS3ControlBucket(), + "aws_s3control_bucket_policy": resourceAwsS3ControlBucketPolicy(), + "aws_s3control_bucket_lifecycle_configuration": resourceAwsS3ControlBucketLifecycleConfiguration(), + "aws_s3outposts_endpoint": resourceAwsS3OutpostsEndpoint(), + "aws_security_group": resourceAwsSecurityGroup(), + "aws_network_interface_sg_attachment": resourceAwsNetworkInterfaceSGAttachment(), + "aws_default_security_group": resourceAwsDefaultSecurityGroup(), + "aws_security_group_rule": resourceAwsSecurityGroupRule(), + "aws_securityhub_account": resourceAwsSecurityHubAccount(), + "aws_securityhub_action_target": resourceAwsSecurityHubActionTarget(), + "aws_securityhub_insight": resourceAwsSecurityHubInsight(), + "aws_securityhub_invite_accepter": resourceAwsSecurityHubInviteAccepter(), + "aws_securityhub_member": resourceAwsSecurityHubMember(), + "aws_securityhub_organization_admin_account": resourceAwsSecurityHubOrganizationAdminAccount(), + "aws_securityhub_organization_configuration": resourceAwsSecurityHubOrganizationConfiguration(), + "aws_securityhub_product_subscription": resourceAwsSecurityHubProductSubscription(), + "aws_securityhub_standards_control": resourceAwsSecurityHubStandardsControl(), + "aws_securityhub_standards_subscription": resourceAwsSecurityHubStandardsSubscription(), + "aws_servicecatalog_budget_resource_association": resourceAwsServiceCatalogBudgetResourceAssociation(), + "aws_servicecatalog_constraint": resourceAwsServiceCatalogConstraint(), + "aws_servicecatalog_organizations_access": resourceAwsServiceCatalogOrganizationsAccess(), + "aws_servicecatalog_portfolio": resourceAwsServiceCatalogPortfolio(), + "aws_servicecatalog_portfolio_share": resourceAwsServiceCatalogPortfolioShare(), + "aws_servicecatalog_product": resourceAwsServiceCatalogProduct(), + "aws_servicecatalog_provisioned_product": resourceAwsServiceCatalogProvisionedProduct(), + "aws_servicecatalog_service_action": resourceAwsServiceCatalogServiceAction(), + "aws_servicecatalog_tag_option": resourceAwsServiceCatalogTagOption(), + "aws_servicecatalog_tag_option_resource_association": resourceAwsServiceCatalogTagOptionResourceAssociation(), + "aws_servicecatalog_principal_portfolio_association": resourceAwsServiceCatalogPrincipalPortfolioAssociation(), + "aws_servicecatalog_product_portfolio_association": resourceAwsServiceCatalogProductPortfolioAssociation(), + "aws_servicecatalog_provisioning_artifact": resourceAwsServiceCatalogProvisioningArtifact(), + "aws_service_discovery_http_namespace": resourceAwsServiceDiscoveryHttpNamespace(), + "aws_service_discovery_private_dns_namespace": resourceAwsServiceDiscoveryPrivateDnsNamespace(), + "aws_service_discovery_public_dns_namespace": resourceAwsServiceDiscoveryPublicDnsNamespace(), + "aws_service_discovery_service": resourceAwsServiceDiscoveryService(), + "aws_servicequotas_service_quota": resourceAwsServiceQuotasServiceQuota(), + "aws_shield_protection": resourceAwsShieldProtection(), + "aws_signer_signing_job": resourceAwsSignerSigningJob(), + "aws_signer_signing_profile": resourceAwsSignerSigningProfile(), + "aws_signer_signing_profile_permission": resourceAwsSignerSigningProfilePermission(), + "aws_simpledb_domain": resourceAwsSimpleDBDomain(), + "aws_ssm_activation": resourceAwsSsmActivation(), + "aws_ssm_association": resourceAwsSsmAssociation(), + "aws_ssm_document": resourceAwsSsmDocument(), + "aws_ssm_maintenance_window": resourceAwsSsmMaintenanceWindow(), + "aws_ssm_maintenance_window_target": resourceAwsSsmMaintenanceWindowTarget(), + "aws_ssm_maintenance_window_task": resourceAwsSsmMaintenanceWindowTask(), + "aws_ssm_patch_baseline": resourceAwsSsmPatchBaseline(), + "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), + "aws_ssm_parameter": resourceAwsSsmParameter(), + "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), + "aws_ssoadmin_account_assignment": resourceAwsSsoAdminAccountAssignment(), + "aws_ssoadmin_managed_policy_attachment": resourceAwsSsoAdminManagedPolicyAttachment(), + "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), + "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), + "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), + "aws_storagegateway_cached_iscsi_volume": resourceAwsStorageGatewayCachedIscsiVolume(), + "aws_storagegateway_file_system_association": resourceAwsStorageGatewayFileSystemAssociation(), + "aws_storagegateway_gateway": resourceAwsStorageGatewayGateway(), + "aws_storagegateway_nfs_file_share": resourceAwsStorageGatewayNfsFileShare(), + "aws_storagegateway_smb_file_share": resourceAwsStorageGatewaySmbFileShare(), + "aws_storagegateway_stored_iscsi_volume": resourceAwsStorageGatewayStoredIscsiVolume(), + "aws_storagegateway_tape_pool": resourceAwsStorageGatewayTapePool(), + "aws_storagegateway_upload_buffer": resourceAwsStorageGatewayUploadBuffer(), + "aws_storagegateway_working_storage": resourceAwsStorageGatewayWorkingStorage(), + "aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(), + "aws_spot_instance_request": resourceAwsSpotInstanceRequest(), + "aws_spot_fleet_request": resourceAwsSpotFleetRequest(), + "aws_sqs_queue": resourceAwsSqsQueue(), + "aws_sqs_queue_policy": resourceAwsSqsQueuePolicy(), + "aws_snapshot_create_volume_permission": resourceAwsSnapshotCreateVolumePermission(), + "aws_sns_platform_application": resourceAwsSnsPlatformApplication(), + "aws_sns_sms_preferences": resourceAwsSnsSmsPreferences(), + "aws_sns_topic": resourceAwsSnsTopic(), + "aws_sns_topic_policy": resourceAwsSnsTopicPolicy(), + "aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(), + "aws_sfn_activity": resourceAwsSfnActivity(), + "aws_sfn_state_machine": resourceAwsSfnStateMachine(), + "aws_default_subnet": resourceAwsDefaultSubnet(), + "aws_subnet": resourceAwsSubnet(), + "aws_swf_domain": resourceAwsSwfDomain(), + "aws_synthetics_canary": resourceAwsSyntheticsCanary(), + "aws_timestreamwrite_database": resourceAwsTimestreamWriteDatabase(), + "aws_timestreamwrite_table": resourceAwsTimestreamWriteTable(), + "aws_transfer_server": resourceAwsTransferServer(), + "aws_transfer_ssh_key": resourceAwsTransferSshKey(), + "aws_transfer_user": resourceAwsTransferUser(), + "aws_volume_attachment": resourceAwsVolumeAttachment(), + "aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(), + "aws_default_vpc_dhcp_options": resourceAwsDefaultVpcDhcpOptions(), + "aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(), + "aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(), + "aws_vpc_peering_connection_accepter": resourceAwsVpcPeeringConnectionAccepter(), + "aws_vpc_peering_connection_options": resourceAwsVpcPeeringConnectionOptions(), + "aws_default_vpc": resourceAwsDefaultVpc(), + "aws_vpc": resourceAwsVpc(), + "aws_vpc_endpoint": resourceAwsVpcEndpoint(), + "aws_vpc_endpoint_connection_notification": resourceAwsVpcEndpointConnectionNotification(), + "aws_vpc_endpoint_route_table_association": resourceAwsVpcEndpointRouteTableAssociation(), + "aws_vpc_endpoint_subnet_association": resourceAwsVpcEndpointSubnetAssociation(), + "aws_vpc_endpoint_service": resourceAwsVpcEndpointService(), + "aws_vpc_endpoint_service_allowed_principal": resourceAwsVpcEndpointServiceAllowedPrincipal(), + "aws_vpc_ipv4_cidr_block_association": resourceAwsVpcIpv4CidrBlockAssociation(), + "aws_vpn_connection": resourceAwsVpnConnection(), + "aws_vpn_connection_route": resourceAwsVpnConnectionRoute(), + "aws_vpn_gateway": resourceAwsVpnGateway(), + "aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(), + "aws_vpn_gateway_route_propagation": resourceAwsVpnGatewayRoutePropagation(), + "aws_waf_byte_match_set": resourceAwsWafByteMatchSet(), + "aws_waf_ipset": resourceAwsWafIPSet(), + "aws_waf_rate_based_rule": resourceAwsWafRateBasedRule(), + "aws_waf_regex_match_set": resourceAwsWafRegexMatchSet(), + "aws_waf_regex_pattern_set": resourceAwsWafRegexPatternSet(), + "aws_waf_rule": resourceAwsWafRule(), + "aws_waf_rule_group": resourceAwsWafRuleGroup(), + "aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(), + "aws_waf_web_acl": resourceAwsWafWebAcl(), + "aws_waf_xss_match_set": resourceAwsWafXssMatchSet(), + "aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(), + "aws_waf_geo_match_set": resourceAwsWafGeoMatchSet(), + "aws_wafregional_byte_match_set": resourceAwsWafRegionalByteMatchSet(), + "aws_wafregional_geo_match_set": resourceAwsWafRegionalGeoMatchSet(), + "aws_wafregional_ipset": resourceAwsWafRegionalIPSet(), + "aws_wafregional_rate_based_rule": resourceAwsWafRegionalRateBasedRule(), + "aws_wafregional_regex_match_set": resourceAwsWafRegionalRegexMatchSet(), + "aws_wafregional_regex_pattern_set": resourceAwsWafRegionalRegexPatternSet(), + "aws_wafregional_rule": resourceAwsWafRegionalRule(), + "aws_wafregional_rule_group": resourceAwsWafRegionalRuleGroup(), + "aws_wafregional_size_constraint_set": resourceAwsWafRegionalSizeConstraintSet(), + "aws_wafregional_sql_injection_match_set": resourceAwsWafRegionalSqlInjectionMatchSet(), + "aws_wafregional_xss_match_set": resourceAwsWafRegionalXssMatchSet(), + "aws_wafregional_web_acl": resourceAwsWafRegionalWebAcl(), + "aws_wafregional_web_acl_association": resourceAwsWafRegionalWebAclAssociation(), + "aws_wafv2_ip_set": resourceAwsWafv2IPSet(), + "aws_wafv2_regex_pattern_set": resourceAwsWafv2RegexPatternSet(), + "aws_wafv2_rule_group": resourceAwsWafv2RuleGroup(), + "aws_wafv2_web_acl": resourceAwsWafv2WebACL(), + "aws_wafv2_web_acl_association": resourceAwsWafv2WebACLAssociation(), + "aws_wafv2_web_acl_logging_configuration": resourceAwsWafv2WebACLLoggingConfiguration(), + "aws_worklink_fleet": resourceAwsWorkLinkFleet(), + "aws_worklink_website_certificate_authority_association": resourceAwsWorkLinkWebsiteCertificateAuthorityAssociation(), + "aws_workspaces_directory": resourceAwsWorkspacesDirectory(), + "aws_workspaces_workspace": resourceAwsWorkspacesWorkspace(), + "aws_batch_compute_environment": resourceAwsBatchComputeEnvironment(), + "aws_batch_job_definition": resourceAwsBatchJobDefinition(), + "aws_batch_job_queue": resourceAwsBatchJobQueue(), + "aws_pinpoint_app": resourceAwsPinpointApp(), + "aws_pinpoint_adm_channel": resourceAwsPinpointADMChannel(), + "aws_pinpoint_apns_channel": resourceAwsPinpointAPNSChannel(), + "aws_pinpoint_apns_sandbox_channel": resourceAwsPinpointAPNSSandboxChannel(), + "aws_pinpoint_apns_voip_channel": resourceAwsPinpointAPNSVoipChannel(), + "aws_pinpoint_apns_voip_sandbox_channel": resourceAwsPinpointAPNSVoipSandboxChannel(), + "aws_pinpoint_baidu_channel": resourceAwsPinpointBaiduChannel(), + "aws_pinpoint_email_channel": resourceAwsPinpointEmailChannel(), + "aws_pinpoint_event_stream": resourceAwsPinpointEventStream(), + "aws_pinpoint_gcm_channel": resourceAwsPinpointGCMChannel(), + "aws_pinpoint_sms_channel": resourceAwsPinpointSMSChannel(), + "aws_xray_encryption_config": resourceAwsXrayEncryptionConfig(), + "aws_xray_group": resourceAwsXrayGroup(), + "aws_xray_sampling_rule": resourceAwsXraySamplingRule(), + "aws_workspaces_ip_group": resourceAwsWorkspacesIpGroup(), + + // ALBs are actually LBs because they can be type `network` or `application` + // To avoid regressions, we will add a new resource for each and they both point + // back to the old ALB version. IF the Terraform supported aliases for resources + // this would be a whole lot simpler + "aws_alb": resourceAwsLb(), + "aws_lb": resourceAwsLb(), + "aws_alb_listener": resourceAwsLbListener(), + "aws_lb_listener": resourceAwsLbListener(), + "aws_alb_listener_certificate": resourceAwsLbListenerCertificate(), + "aws_lb_listener_certificate": resourceAwsLbListenerCertificate(), + "aws_alb_listener_rule": resourceAwsLbbListenerRule(), + "aws_lb_listener_rule": resourceAwsLbbListenerRule(), + "aws_alb_target_group": resourceAwsLbTargetGroup(), + "aws_lb_target_group": resourceAwsLbTargetGroup(), + "aws_alb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), + "aws_lb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), + }, + } + + // Avoid Go formatting churn and Git conflicts + // You probably should not do this + provider.DataSourcesMap["aws_serverlessapplicationrepository_application"] = dataSourceAwsServerlessApplicationRepositoryApplication() + provider.ResourcesMap["aws_serverlessapplicationrepository_cloudformation_stack"] = resourceAwsServerlessApplicationRepositoryCloudFormationStack() + + provider.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) { + terraformVersion := provider.TerraformVersion + if terraformVersion == "" { + // Terraform 0.12 introduced this field to the protocol + // We can therefore assume that if it's missing it's 0.10 or 0.11 + terraformVersion = "0.11+compatible" + } + return providerConfigure(d, terraformVersion) + } + + return provider +} + +var descriptions map[string]string +var endpointServiceNames []string + +func init() { + descriptions = map[string]string{ + "region": "The region where AWS operations will take place. Examples\n" + + "are us-east-1, us-west-2, etc.", // lintignore:AWSAT003 + + "access_key": "The access key for API operations. You can retrieve this\n" + + "from the 'Security & Credentials' section of the AWS console.", + + "secret_key": "The secret key for API operations. You can retrieve this\n" + + "from the 'Security & Credentials' section of the AWS console.", + + "profile": "The profile for API operations. If not set, the default profile\n" + + "created with `aws configure` will be used.", + + "shared_credentials_file": "The path to the shared credentials file. If not set\n" + + "this defaults to ~/.aws/credentials.", + + "token": "session token. A session token is only required if you are\n" + + "using temporary security credentials.", + + "max_retries": "The maximum number of times an AWS API request is\n" + + "being executed. If the API request still fails, an error is\n" + + "thrown.", + + "endpoint": "Use this to override the default service endpoint URL", + + "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + + "default value is `false`", + + "skip_credentials_validation": "Skip the credentials validation via STS API. " + + "Used for AWS API implementations that do not have STS available/implemented.", + + "skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " + + "Used by users that don't have ec2:DescribeAccountAttributes permissions.", + + "skip_region_validation": "Skip static validation of region name. " + + "Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).", + + "skip_requesting_account_id": "Skip requesting the account ID. " + + "Used for AWS API implementations that do not have IAM/STS API and/or metadata API.", + + "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + + "Used for AWS API implementations that do not have a metadata api endpoint.", + + "s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" + + "i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" + + "use virtual hosted bucket addressing when possible\n" + + "(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.", + } + + endpointServiceNames = []string{ + "accessanalyzer", + "acm", + "acmpca", + "amplify", + "apigateway", + "appconfig", + "applicationautoscaling", + "applicationinsights", + "appmesh", + "apprunner", + "appstream", + "appsync", + "athena", + "auditmanager", + "autoscaling", + "autoscalingplans", + "backup", + "batch", + "budgets", + "chime", + "cloud9", + "cloudformation", + "cloudfront", + "cloudhsm", + "cloudsearch", + "cloudtrail", + "cloudwatch", + "cloudwatchevents", + "cloudwatchlogs", + "codeartifact", + "codebuild", + "codecommit", + "codedeploy", + "codepipeline", + "codestarconnections", + "cognitoidentity", + "cognitoidp", + "configservice", + "connect", + "cur", + "dataexchange", + "datapipeline", + "datasync", + "dax", + "detective", + "devicefarm", + "directconnect", + "dlm", + "dms", + "docdb", + "ds", + "dynamodb", + "ec2", + "ecr", + "ecrpublic", + "ecs", + "efs", + "eks", + "elasticache", + "elasticbeanstalk", + "elastictranscoder", + "elb", + "emr", + "emrcontainers", + "es", + "firehose", + "fms", + "forecast", + "fsx", + "gamelift", + "glacier", + "globalaccelerator", + "glue", + "greengrass", + "guardduty", + "iam", + "identitystore", + "imagebuilder", + "inspector", + "iot", + "iotanalytics", + "iotevents", + "kafka", + "kinesis", + "kinesisanalytics", + "kinesisanalyticsv2", + "kinesisvideo", + "kms", + "lakeformation", + "lambda", + "lexmodels", + "licensemanager", + "lightsail", + "location", + "macie", + "macie2", + "managedblockchain", + "marketplacecatalog", + "mediaconnect", + "mediaconvert", + "medialive", + "mediapackage", + "mediastore", + "mediastoredata", + "mq", + "mwaa", + "neptune", + "networkfirewall", + "networkmanager", + "opsworks", + "organizations", + "outposts", + "personalize", + "pinpoint", + "pricing", + "qldb", + "quicksight", + "ram", + "rds", + "redshift", + "resourcegroups", + "resourcegroupstaggingapi", + "route53", + "route53domains", + "route53resolver", + "s3", + "s3control", + "s3outposts", + "sagemaker", + "schemas", + "sdb", + "secretsmanager", + "securityhub", + "serverlessrepo", + "servicecatalog", + "servicediscovery", + "servicequotas", + "ses", + "shield", + "signer", + "sns", + "sqs", + "ssm", + "ssoadmin", + "stepfunctions", + "storagegateway", + "sts", + "swf", + "synthetics", + "timestreamwrite", + "transfer", + "waf", + "wafregional", + "wafv2", + "worklink", + "workmail", + "workspaces", + "xray", + } +} + +func providerConfigure(d *schema.ResourceData, terraformVersion string) (interface{}, error) { + config := Config{ + AccessKey: d.Get("access_key").(string), + SecretKey: d.Get("secret_key").(string), + Profile: d.Get("profile").(string), + Token: d.Get("token").(string), + Region: d.Get("region").(string), + CredsFilename: d.Get("shared_credentials_file").(string), + DefaultTagsConfig: expandProviderDefaultTags(d.Get("default_tags").([]interface{})), + Endpoints: make(map[string]string), + MaxRetries: d.Get("max_retries").(int), + IgnoreTagsConfig: expandProviderIgnoreTags(d.Get("ignore_tags").([]interface{})), + Insecure: d.Get("insecure").(bool), + SkipCredsValidation: d.Get("skip_credentials_validation").(bool), + SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool), + SkipRegionValidation: d.Get("skip_region_validation").(bool), + SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), + SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), + S3ForcePathStyle: d.Get("s3_force_path_style").(bool), + terraformVersion: terraformVersion, + } + + if l, ok := d.Get("assume_role").([]interface{}); ok && len(l) > 0 && l[0] != nil { + m := l[0].(map[string]interface{}) + + if v, ok := m["duration_seconds"].(int); ok && v != 0 { + config.AssumeRoleDurationSeconds = v + } + + if v, ok := m["external_id"].(string); ok && v != "" { + config.AssumeRoleExternalID = v + } + + if v, ok := m["policy"].(string); ok && v != "" { + config.AssumeRolePolicy = v + } + + if policyARNSet, ok := m["policy_arns"].(*schema.Set); ok && policyARNSet.Len() > 0 { + for _, policyARNRaw := range policyARNSet.List() { + policyARN, ok := policyARNRaw.(string) + + if !ok { + continue + } + + config.AssumeRolePolicyARNs = append(config.AssumeRolePolicyARNs, policyARN) + } + } + + if v, ok := m["role_arn"].(string); ok && v != "" { + config.AssumeRoleARN = v + } + + if v, ok := m["session_name"].(string); ok && v != "" { + config.AssumeRoleSessionName = v + } + + if tagMapRaw, ok := m["tags"].(map[string]interface{}); ok && len(tagMapRaw) > 0 { + config.AssumeRoleTags = make(map[string]string) + + for k, vRaw := range tagMapRaw { + v, ok := vRaw.(string) + + if !ok { + continue + } + + config.AssumeRoleTags[k] = v + } + } + + if transitiveTagKeySet, ok := m["transitive_tag_keys"].(*schema.Set); ok && transitiveTagKeySet.Len() > 0 { + for _, transitiveTagKeyRaw := range transitiveTagKeySet.List() { + transitiveTagKey, ok := transitiveTagKeyRaw.(string) + + if !ok { + continue + } + + config.AssumeRoleTransitiveTagKeys = append(config.AssumeRoleTransitiveTagKeys, transitiveTagKey) + } + } + + log.Printf("[INFO] assume_role configuration set: (ARN: %q, SessionID: %q, ExternalID: %q)", config.AssumeRoleARN, config.AssumeRoleSessionName, config.AssumeRoleExternalID) + } + + endpointsSet := d.Get("endpoints").(*schema.Set) + + for _, endpointsSetI := range endpointsSet.List() { + endpoints := endpointsSetI.(map[string]interface{}) + for _, endpointServiceName := range endpointServiceNames { + config.Endpoints[endpointServiceName] = endpoints[endpointServiceName].(string) + } + } + + if v, ok := d.GetOk("allowed_account_ids"); ok { + for _, accountIDRaw := range v.(*schema.Set).List() { + config.AllowedAccountIds = append(config.AllowedAccountIds, accountIDRaw.(string)) + } + } + + if v, ok := d.GetOk("forbidden_account_ids"); ok { + for _, accountIDRaw := range v.(*schema.Set).List() { + config.ForbiddenAccountIds = append(config.ForbiddenAccountIds, accountIDRaw.(string)) + } + } + + return config.Client() +} + +// This is a global MutexKV for use within this plugin. +var awsMutexKV = mutexkv.NewMutexKV() + +func assumeRoleSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "duration_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: "Seconds to restrict the assume role session duration.", + }, + "external_id": { + Type: schema.TypeString, + Optional: true, + Description: "Unique identifier that might be required for assuming a role in another account.", + }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", + ValidateFunc: validation.StringIsJSON, + }, + "policy_arns": { + Type: schema.TypeSet, + Optional: true, + Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "Amazon Resource Name of an IAM Role to assume prior to making API calls.", + ValidateFunc: validateArn, + }, + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: "Identifier for the assumed role session.", + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Assume role session tags.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "transitive_tag_keys": { + Type: schema.TypeSet, + Optional: true, + Description: "Assume role session tag keys to pass to any subsequent sessions.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + } +} + +func endpointsSchema() *schema.Schema { + endpointsAttributes := make(map[string]*schema.Schema) + + for _, endpointServiceName := range endpointServiceNames { + endpointsAttributes[endpointServiceName] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["endpoint"], + } + } + + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: endpointsAttributes, + }, + } +} + +func expandProviderDefaultTags(l []interface{}) *keyvaluetags.DefaultConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + defaultConfig := &keyvaluetags.DefaultConfig{} + m := l[0].(map[string]interface{}) + + if v, ok := m["tags"].(map[string]interface{}); ok { + defaultConfig.Tags = keyvaluetags.New(v) + } + return defaultConfig +} + +func expandProviderIgnoreTags(l []interface{}) *keyvaluetags.IgnoreConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + ignoreConfig := &keyvaluetags.IgnoreConfig{} + m := l[0].(map[string]interface{}) + + if v, ok := m["keys"].(*schema.Set); ok { + ignoreConfig.Keys = keyvaluetags.New(v.List()) + } + + if v, ok := m["key_prefixes"].(*schema.Set); ok { + ignoreConfig.KeyPrefixes = keyvaluetags.New(v.List()) + } + + return ignoreConfig +} + +// ReverseDns switches a DNS hostname to reverse DNS and vice-versa. +func ReverseDns(hostname string) string { + parts := strings.Split(hostname, ".") + + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + + return strings.Join(parts, ".") +} From 3bd0c6b7d0edcb9425d656ef6c95b95bffed05a1 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 11:35:47 -0700 Subject: [PATCH 39/80] cleanup and remove unneeded logic --- ...aws_s3_bucket_replication_configuration.go | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go new file mode 100644 index 00000000000..400a2f12c5b --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -0,0 +1,215 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketReplicationConfigurationCreate, + Read: resourceAwsS3BucketReplicationConfigurationRead, + Update: resourceAwsS3BucketReplicationConfigurationUpdate, + Delete: resourceAwsS3BucketReplicationConfigurationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"bucket_prefix"}, + ValidateFunc: validation.StringLenBetween(0, 63), + }, + "role": { + Type: schema.TypeString, + Required: true, + }, + "rules": { + Type: schema.TypeSet, + Required: true, + Set: rulesHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), + }, + "destination": { + Type: schema.TypeList, + MaxItems: 1, + MinItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsAccountId, + }, + "bucket": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + }, + "replica_kms_key_id": { + Type: schema.TypeString, + Optional: true, + }, + "access_control_translation": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + }, + }, + }, + }, + }, + }, + }, + "source_selection_criteria": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sse_kms_encrypted_objects": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "tags": tagsSchema(), + }, + }, + }, + "delete_marker_replication_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + }, + }, + }, + }, + + "tags": tagsSchema(), + "tags_all": tagsSchemaComputed(), + }, + + CustomizeDiff: SetTagsDiff, + } +} + +func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + input := &s3.HeadBucketInput{ + Bucket: aws.String(d.Get("bucket").(string)), + } + + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + _, err := s3conn.HeadBucket(input) + + if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + return resource.RetryableError(err) + } + + if d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + // Read the bucket replication configuration + replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(d.Get("bucket").(string)), + }) + }) + if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { + return fmt.Errorf("error getting S3 Bucket replication: %s", err) + } + replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) + if !ok || replication == nil { + return fmt.Errorf("error reading replication_configuration") + } + + return nil +} + +func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + + return nil +} From e4676496628685e1d1949e17400474e8c14db4e1 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:14:09 -0700 Subject: [PATCH 40/80] WIP setup update processes --- ...aws_s3_bucket_replication_configuration.go | 152 ++++++++++++++++-- 1 file changed, 142 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 400a2f12c5b..74af667e458 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -23,12 +26,11 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"bucket_prefix"}, - ValidateFunc: validation.StringLenBetween(0, 63), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(0, 63), }, "role": { Type: schema.TypeString, @@ -164,10 +166,6 @@ func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, m return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } -func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - return resourceAwsS3BucketReplicationConfigurationRead(d, meta) -} - func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn @@ -209,6 +207,140 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil } +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + rc := &s3.ReplicationConfiguration{} + if val, ok := d.GetOk("role"); ok { + rc.Role = aws.String(val.(string)) + } + + rcRules := d.Get("rules").(*schema.Set).List() + rules := []*s3.ReplicationRule{} + for _, v := range rcRules { + rr := v.(map[string]interface{}) + rcRule := &s3.ReplicationRule{} + if status, ok := rr["status"]; ok && status != "" { + rcRule.Status = aws.String(status.(string)) + } else { + continue + } + + if rrid, ok := rr["id"]; ok && rrid != "" { + rcRule.ID = aws.String(rrid.(string)) + } + + ruleDestination := &s3.Destination{} + if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { + if dest[0] != nil { + bd := dest[0].(map[string]interface{}) + ruleDestination.Bucket = aws.String(bd["bucket"].(string)) + + if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { + ruleDestination.StorageClass = aws.String(storageClass.(string)) + } + + if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" { + ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)), + } + } + + if account, ok := bd["account_id"]; ok && account != "" { + ruleDestination.Account = aws.String(account.(string)) + } + + if aclTranslation, ok := bd["access_control_translation"].([]interface{}); ok && len(aclTranslation) > 0 { + aclTranslationValues := aclTranslation[0].(map[string]interface{}) + ruleAclTranslation := &s3.AccessControlTranslation{} + ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) + ruleDestination.AccessControlTranslation = ruleAclTranslation + } + } + } + rcRule.Destination = ruleDestination + + if ssc, ok := rr["source_selection_criteria"].([]interface{}); ok && len(ssc) > 0 { + if ssc[0] != nil { + sscValues := ssc[0].(map[string]interface{}) + ruleSsc := &s3.SourceSelectionCriteria{} + if sseKms, ok := sscValues["sse_kms_encrypted_objects"].([]interface{}); ok && len(sseKms) > 0 { + if sseKms[0] != nil { + sseKmsValues := sseKms[0].(map[string]interface{}) + sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} + if sseKmsValues["enabled"].(bool) { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) + } else { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) + } + ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects + } + } + rcRule.SourceSelectionCriteria = ruleSsc + } + } + + if f, ok := rr["filter"].([]interface{}); ok && len(f) > 0 && f[0] != nil { + // XML schema V2. + rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) + rcRule.Filter = &s3.ReplicationRuleFilter{} + filter := f[0].(map[string]interface{}) + tags := keyvaluetags.New(filter["tags"]).IgnoreAws().S3Tags() + if len(tags) > 0 { + rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(filter["prefix"].(string)), + Tags: tags, + } + } else { + rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) + } + + if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(dmr), + } + } else { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + } + } + } else { + // XML schema V1. + rcRule.Prefix = aws.String(rr["prefix"].(string)) + } + + rules = append(rules, rcRule) + } + + rc.Rules = rules + i := &s3.PutBucketReplicationInput{ + Bucket: aws.String(bucket), + ReplicationConfiguration: rc, + } + log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := s3conn.PutBucketReplication(i) + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketReplication(i) + } + if err != nil { + return fmt.Errorf("Error putting S3 replication configuration: %s", err) + } + + return nil + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { return nil From a445cff08284b72bf900f7479f9544cf4db29174 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:15:06 -0700 Subject: [PATCH 41/80] WIP pull in tests from s3 bucket resource --- ...3_bucket_replication_configuration_test.go | 1461 +++++++++++++++++ 1 file changed, 1461 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration_test.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go new file mode 100644 index 00000000000..ed348eb3c91 --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -0,0 +1,1461 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "regexp" + "testing" +) + +func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "GLACIER"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassGlacier), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "prefix3", + "filter.0.tags.%": "1", + "filter.0.tags.Key3": "Value3", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { + // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + +// StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 +func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { + rInt := acctest.RandInt() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), + ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), + }, + }, + }) +} + +// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 +func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + }, + }, + }, + Priority: aws.Int64(42), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String("foo"), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + }, + }, + }, + Priority: aws.Int64(41), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + { + Key: aws.String("Foo"), + Value: aws.String("Bar"), + }, + }, + }, + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationResourceName := "aws_s3_bucket.destination" + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExists(destinationResourceName), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + +func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { + return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = "tf-iam-role-replication-%[1]d" + + assume_role_policy = < Date: Thu, 19 Aug 2021 11:04:42 -0700 Subject: [PATCH 42/80] WIP ensure create/read/update logic is operational --- ...aws_s3_bucket_replication_configuration.go | 127 +++++++++++++++++- ...3_bucket_replication_configuration_test.go | 33 ++++- 2 files changed, 153 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 74af667e458..4f8cc098c2a 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,8 +2,8 @@ package aws import ( "fmt" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "log" + "net/http" "time" "github.com/aws/aws-sdk-go/aws" @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ - Create: resourceAwsS3BucketReplicationConfigurationCreate, + Create: resourceAwsS3BucketReplicationConfigurationPut, Read: resourceAwsS3BucketReplicationConfigurationRead, Update: resourceAwsS3BucketReplicationConfigurationUpdate, Delete: resourceAwsS3BucketReplicationConfigurationDelete, @@ -162,7 +164,16 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { } } -func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { +func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta interface{}) error { + // Get the bucket + var bucket string + if v, ok := d.GetOk("bucket"); ok { + bucket = v.(string) + } else { + // fail, can't do anything without a bucket + } + d.SetId(bucket) + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } @@ -176,7 +187,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) - if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { return resource.RetryableError(err) } @@ -190,6 +201,29 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil }) + + if tfresource.TimedOut(err) { + _, err = s3conn.HeadBucket(input) + } + + if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) + } + + if _, ok := d.GetOk("bucket"); !ok { + d.Set("bucket", d.Id()) + } + // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ @@ -203,6 +237,90 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if !ok || replication == nil { return fmt.Errorf("error reading replication_configuration") } + r := replication.ReplicationConfiguration + // set role + if r.Role != nil && aws.StringValue(r.Role) != "" { + d.Set("role", aws.StringValue(r.Role)) + } + + // set rules, these need to be flattened + rules := make([]interface{}, 0, len(r.Rules)) + for _, v := range r.Rules { + t := make(map[string]interface{}) + if v.Destination != nil { + rd := make(map[string]interface{}) + if v.Destination.Bucket != nil { + rd["bucket"] = aws.StringValue(v.Destination.Bucket) + } + if v.Destination.StorageClass != nil { + rd["storage_class"] = aws.StringValue(v.Destination.StorageClass) + } + if v.Destination.EncryptionConfiguration != nil { + if v.Destination.EncryptionConfiguration.ReplicaKmsKeyID != nil { + rd["replica_kms_key_id"] = aws.StringValue(v.Destination.EncryptionConfiguration.ReplicaKmsKeyID) + } + } + if v.Destination.Account != nil { + rd["account_id"] = aws.StringValue(v.Destination.Account) + } + if v.Destination.AccessControlTranslation != nil { + rdt := map[string]interface{}{ + "owner": aws.StringValue(v.Destination.AccessControlTranslation.Owner), + } + rd["access_control_translation"] = []interface{}{rdt} + } + t["destination"] = []interface{}{rd} + } + + if v.ID != nil { + t["id"] = aws.StringValue(v.ID) + } + if v.Prefix != nil { + t["prefix"] = aws.StringValue(v.Prefix) + } + if v.Status != nil { + t["status"] = aws.StringValue(v.Status) + } + if vssc := v.SourceSelectionCriteria; vssc != nil { + tssc := make(map[string]interface{}) + if vssc.SseKmsEncryptedObjects != nil { + tSseKms := make(map[string]interface{}) + if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { + tSseKms["enabled"] = true + } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { + tSseKms["enabled"] = false + } + tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} + } + t["source_selection_criteria"] = []interface{}{tssc} + } + + if v.Priority != nil { + t["priority"] = int(aws.Int64Value(v.Priority)) + } + + if f := v.Filter; f != nil { + m := map[string]interface{}{} + if f.Prefix != nil { + m["prefix"] = aws.StringValue(f.Prefix) + } + if t := f.Tag; t != nil { + m["tags"] = keyvaluetags.S3KeyValueTags([]*s3.Tag{t}).IgnoreAws().Map() + } + if a := f.And; a != nil { + m["prefix"] = aws.StringValue(a.Prefix) + m["tags"] = keyvaluetags.S3KeyValueTags(a.Tags).IgnoreAws().Map() + } + t["filter"] = []interface{}{m} + + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { + t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + } + } + + rules = append(rules, t) + } + d.Set("rules", schema.NewSet(rulesHash, rules)) return nil } @@ -337,7 +455,6 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m return fmt.Errorf("Error putting S3 replication configuration: %s", err) } - return nil return resourceAwsS3BucketReplicationConfigurationRead(d, meta) } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index ed348eb3c91..c12697906f4 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,15 +2,44 @@ package aws import ( "fmt" + "regexp" + "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "regexp" - "testing" ) +func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { + rInt := acctest.RandInt() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + ), + }, + }, + }) +} + func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() From 9b24010250a7f2afb6cbe1819871ed49f7110e60 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 19 Aug 2021 14:05:53 -0700 Subject: [PATCH 43/80] basic tests passing --- ...aws_s3_bucket_replication_configuration.go | 5 - ...3_bucket_replication_configuration_test.go | 92 +++++-------------- 2 files changed, 23 insertions(+), 74 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4f8cc098c2a..5742d810d4b 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -155,12 +155,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, - - "tags": tagsSchema(), - "tags_all": tagsSchemaComputed(), }, - - CustomizeDiff: SetTagsDiff, } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c12697906f4..f872ab698f1 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -12,34 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { - rInt := acctest.RandInt() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - ), - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() @@ -902,6 +874,10 @@ resource "aws_s3_bucket" "destination" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "source" { @@ -910,6 +886,10 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } `, randInt) } @@ -945,6 +925,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -954,6 +937,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1014,6 +1000,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -1023,6 +1012,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1094,6 +1086,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1316,47 +1311,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { `) } -func testAccAWSS3BucketReplicationConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination string) string { - return composeConfig(testAccAWSS3BucketReplicationConfig_iamPolicy(rName), fmt.Sprintf(` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = %[1]q - acl = "private" - - versioning { - enabled = true - } - - replication_configuration { - role = aws_iam_role.test.arn - - rules { - id = "testid" - status = "Enabled" - - filter { - prefix = "testprefix" - } - - delete_marker_replication_status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } - } -} - -resource "aws_s3_bucket_replication_configuration" "destination" { - bucket = %[2]q - - versioning { - enabled = true - } -} -`, rName, rNameDestination)) -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { From 94cc6190ac8d4cb63b3085968e13c29efd48e379 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 15:27:31 -0700 Subject: [PATCH 44/80] Update expected resource names Rename resource names to reflect new position in configuration scope of the independent resource. Use literal strings instead of fmt.Sprint in hcl concatination --- ...3_bucket_replication_configuration_test.go | 169 +++++++----------- 1 file changed, 66 insertions(+), 103 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index f872ab698f1..4ac4e078867 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -109,7 +108,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -130,9 +129,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -141,7 +139,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -150,7 +148,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -176,7 +174,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -197,9 +195,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -208,7 +205,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -218,7 +215,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -247,7 +244,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -267,9 +264,8 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -278,7 +274,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -306,7 +302,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -324,9 +320,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -358,9 +353,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -399,7 +393,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -417,9 +411,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -448,9 +441,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -480,7 +472,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -512,35 +504,12 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { - rInt := acctest.RandInt() - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), - ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), - }, - }, - }) -} - // Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -578,7 +547,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -596,9 +565,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -626,9 +594,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -663,9 +630,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -701,9 +667,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -743,9 +708,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -790,7 +754,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" rName := acctest.RandomWithPrefix("tf-acc-test") destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") @@ -805,9 +769,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -943,7 +906,7 @@ resource "aws_s3_bucket" "destination3" { } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn rules { @@ -984,7 +947,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } - } + } `, randInt)) } @@ -1131,7 +1094,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_kms_key" "replica" { provider = "awsalternate" description = "TF Acceptance Test S3 repl KMS key" @@ -1160,11 +1123,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1187,11 +1150,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1210,11 +1173,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_kms_key" "replica" { @@ -1250,11 +1213,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1269,11 +1232,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1288,11 +1251,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1308,11 +1271,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1331,11 +1294,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1356,11 +1319,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1383,11 +1346,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1413,11 +1376,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1440,5 +1403,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } From cabf7d24f3394e4e96bbe42131a22311650aba4e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 16:03:59 -0700 Subject: [PATCH 45/80] Guard against missing bucket or import id Ensure that the source bucket name is configured in the HCL Ensure that when importing the bucket name is passed in to the process as the import id value --- ...aws_s3_bucket_replication_configuration.go | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 5742d810d4b..1eb8c033488 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -1,6 +1,7 @@ package aws import ( + "errors" "fmt" "log" "net/http" @@ -165,7 +166,8 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta if v, ok := d.GetOk("bucket"); ok { bucket = v.(string) } else { - // fail, can't do anything without a bucket + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") } d.SetId(bucket) @@ -173,12 +175,24 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta } func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - input := &s3.HeadBucketInput{ - Bucket: aws.String(d.Get("bucket").(string)), + if _, ok := d.GetOk("bucket"); !ok { + // during import operations, use the supplied ID for the bucket name + d.Set("bucket", d.Id()) + } + + var bucket *string + input := &s3.HeadBucketInput{} + if rsp, ok := d.GetOk("bucket"); !ok { + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") + } else { + bucket = aws.String(rsp.(string)) + input.Bucket = bucket } + s3conn := meta.(*AWSClient).s3conn + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) @@ -215,14 +229,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) } - if _, ok := d.GetOk("bucket"); !ok { - d.Set("bucket", d.Id()) - } - // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: aws.String(d.Get("bucket").(string)), + Bucket: bucket, }) }) if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { From 0118c80dc9132ce8bbd96b875cc15cd944a83c2b Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 24 Aug 2021 16:11:06 -0700 Subject: [PATCH 46/80] Cleanout and relocate testing logic Relocate replication testing helper functions out of the s3 bucket tests and into the replication configuration testing file. Remove s3 bucket existance checks from replication testing per does not apply to the replication resource logic. --- ...3_bucket_replication_configuration_test.go | 290 ++++++++++-------- internal/service/s3/bucket_test.go | 64 ---- 2 files changed, 155 insertions(+), 199 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 4ac4e078867..e9bf0f3c449 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "reflect" + "sort" + "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -9,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { @@ -106,8 +110,6 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -125,10 +127,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -172,8 +170,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -191,10 +187,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -242,8 +234,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -261,9 +251,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -299,7 +286,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -319,7 +305,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -352,7 +337,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -390,7 +374,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -410,7 +393,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -440,7 +422,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -470,8 +451,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -488,10 +467,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), + Check: resource.ComposeTestCheckFunc(), }, { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -504,47 +480,8 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 -func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { - rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -564,10 +501,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -593,10 +528,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -629,10 +562,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -666,10 +597,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -707,10 +636,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -755,8 +682,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() rName := acctest.RandomWithPrefix("tf-acc-test") - destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -766,12 +693,10 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExists(resourceName), testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -795,7 +720,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { ), }, { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, @@ -806,8 +731,71 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + for _, rule := range rules { + if dest := rule.Destination; dest != nil { + if account := dest.Account; account != nil && strings.HasPrefix(aws.StringValue(dest.Account), "${") { + resourceReference := strings.Replace(aws.StringValue(dest.Account), "${", "", 1) + resourceReference = strings.Replace(resourceReference, "}", "", 1) + resourceReferenceParts := strings.Split(resourceReference, ".") + resourceAttribute := resourceReferenceParts[len(resourceReferenceParts)-1] + resourceName := strings.Join(resourceReferenceParts[:len(resourceReferenceParts)-1], ".") + value := s.RootModule().Resources[resourceName].Primary.Attributes[resourceAttribute] + dest.Account = aws.String(value) + } + if ec := dest.EncryptionConfiguration; ec != nil { + if ec.ReplicaKmsKeyID != nil { + key_arn := s.RootModule().Resources["aws_kms_key.replica"].Primary.Attributes["arn"] + ec.ReplicaKmsKeyID = aws.String(strings.Replace(*ec.ReplicaKmsKeyID, "${aws_kms_key.replica.arn}", key_arn, -1)) + } + } + } + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + + conn := testAccProvider.Meta().(*AWSClient).s3conn + out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(rs.Primary.ID), + }) + if err != nil { + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + return fmt.Errorf("S3 bucket not found") + } + if rules == nil { + return nil + } + return fmt.Errorf("GetReplicationConfiguration error: %v", err) + } + + for _, rule := range out.ReplicationConfiguration.Rules { + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + if !reflect.DeepEqual(out.ReplicationConfiguration.Rules, rules) { + return fmt.Errorf("bad replication rules, expected: %v, got %v", rules, out.ReplicationConfiguration.Rules) + } + + return nil + } +} + func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { - return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` + return fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_iam_role" "role" { @@ -853,8 +841,7 @@ resource "aws_s3_bucket" "source" { lifecycle { ignore_changes = [replication_configuration] } -} -`, randInt) +} `, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { @@ -873,8 +860,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "%[1]s" } } -} -`, storageClass) +} `, storageClass) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { @@ -948,8 +934,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1034,8 +1019,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1089,8 +1073,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1122,8 +1105,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1149,8 +1131,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1172,8 +1153,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1212,8 +1192,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { @@ -1231,8 +1210,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.destination.arn } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { @@ -1250,28 +1228,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` -} - -func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { @@ -1293,8 +1250,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { @@ -1318,8 +1274,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { @@ -1345,8 +1300,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { @@ -1375,8 +1329,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { @@ -1402,6 +1355,73 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } +} ` } -` + +func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Mon, 30 Aug 2021 10:19:46 -0700 Subject: [PATCH 47/80] Support Existing Object Replication Adding schema for ExistingObjectReplication configuration Adding read logic to identify ExistingObjectReplication configurations added to replication rules Adding update logic to include ExistingObjectReplicaiton configuration in the PutBucketReplicaiton input --- ...aws_s3_bucket_replication_configuration.go | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1eb8c033488..09a75e12c6f 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -148,6 +148,21 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "existing_object_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + }, + }, + }, + }, "delete_marker_replication_status": { Type: schema.TypeString, Optional: true, @@ -248,7 +263,6 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met d.Set("role", aws.StringValue(r.Role)) } - // set rules, these need to be flattened rules := make([]interface{}, 0, len(r.Rules)) for _, v := range r.Rules { t := make(map[string]interface{}) @@ -277,6 +291,12 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met t["destination"] = []interface{}{rd} } + if v.ExistingObjectReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) + t["existing_object_replication"] = status + } + if v.ID != nil { t["id"] = aws.StringValue(v.ID) } @@ -354,6 +374,14 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.ID = aws.String(rrid.(string)) } + eor := rr["existing_object_replication"].([]interface{}) + if len(eor) > 0 { + s := eor[0].(map[string]interface{}) + rcRule.ExistingObjectReplication = &s3.ExistingObjectReplication{ + Status: aws.String(s["status"].(string)), + } + } + ruleDestination := &s3.Destination{} if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { if dest[0] != nil { From 60d212441b0a4acc173755d6ff9ddc5802cfe8fc Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 30 Aug 2021 10:23:04 -0700 Subject: [PATCH 48/80] Testing for ExistingObjectReplication In order for ExistingObjectReplication to work on s3 buckets, a request to AWS Technical Support needs to be made. Once they allow the configuration the test will operate as expected. --- ...3_bucket_replication_configuration_test.go | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e9bf0f3c449..31bf253c698 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -731,6 +731,68 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +const isExistingObjectReplicationBlocked = true + +func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { + if isExistingObjectReplicationBlocked { + /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. + Once that request is approved, this can be unblocked for testing. */ + return + } + resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + ExistingObjectReplication: &s3.ExistingObjectReplication{ + Status: aws.String(s3.ExistingObjectReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] @@ -1425,3 +1487,76 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } `, rName, rNameDestination, rInt) } + +func testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Wed, 1 Sep 2021 08:39:13 -0700 Subject: [PATCH 49/80] Adding support for Replication Time Control new schema definition for "replication_time" along with update and read logic. tracking upstream changes, adopt "waiter" module --- ...aws_s3_bucket_replication_configuration.go | 62 +++++++++++++++- ...3_bucket_replication_configuration_test.go | 72 +++++++++++++++++++ 2 files changed, 131 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 09a75e12c6f..4fd9a8ce7c7 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -91,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "replication_time": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + }, + "time": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -157,7 +188,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), }, }, @@ -208,7 +239,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met s3conn := meta.(*AWSClient).s3conn - err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + err := resource.Retry(waiter.BucketCreatedTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { @@ -288,10 +319,20 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } rd["access_control_translation"] = []interface{}{rdt} } + if v.Destination.ReplicationTime != nil { + if v.Destination.ReplicationTime.Status != nil { + rd["replication_time"] = map[string]interface{}{ + "status": v.Destination.ReplicationTime.Status, + "time": map[string]interface{}{ + "minutes": v.Destination.ReplicationTime.Time.Minutes, + }, + } + } + } t["destination"] = []interface{}{rd} } - if v.ExistingObjectReplication.Status != nil { + if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) t["existing_object_replication"] = status @@ -408,6 +449,21 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) ruleDestination.AccessControlTranslation = ruleAclTranslation } + + rt, ok := bd["replication_time"].([]interface{}) + if ok && len(rt) > 0 { + s := rt[0].(map[string]interface{}) + if t, ok := s["time"].([]interface{}); ok && len(t) > 0 { + m := t[0].(map[string]interface{}) + ruleDestination.ReplicationTime = &s3.ReplicationTime{ + Status: aws.String(s["status"].(string)), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 31bf253c698..7d44e94c166 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -448,6 +448,54 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo }) } +func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigRTC(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + ReplicationTime: &s3.ReplicationTime{ + Status: aws.String(s3.ReplicationTimeStatusEnabled), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -925,6 +973,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } `, storageClass) } +func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } + } + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), From 6d2fdcd6bb123e8c38b74938434cd743ed9b39be Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 14:29:24 -0700 Subject: [PATCH 50/80] Adding Metrics support Metrics are a requirement for the Replication Time Control functionality. Adding it here. Restructure the configuration read logic for Replication Time to be more correct and inline with expected data structures Update tests to reflect changes --- ...aws_s3_bucket_replication_configuration.go | 66 +++++++++++++++++-- ...3_bucket_replication_configuration_test.go | 27 ++++++-- 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4fd9a8ce7c7..1cf97c4b1d8 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -92,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "metrics": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + }, + "event_threshold": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, "replication_time": { Type: schema.TypeList, Optional: true, @@ -320,13 +350,23 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met rd["access_control_translation"] = []interface{}{rdt} } if v.Destination.ReplicationTime != nil { + drt := make(map[string]interface{}) if v.Destination.ReplicationTime.Status != nil { - rd["replication_time"] = map[string]interface{}{ - "status": v.Destination.ReplicationTime.Status, - "time": map[string]interface{}{ - "minutes": v.Destination.ReplicationTime.Time.Minutes, - }, - } + drt["status"] = aws.StringValue(v.Destination.ReplicationTime.Status) + drtm := make(map[string]interface{}) + drtm["minutes"] = aws.Int64Value(v.Destination.ReplicationTime.Time.Minutes) + drt["time"] = []interface{}{drtm} + rd["replication_time"] = []interface{}{drt} + } + } + if v.Destination.Metrics != nil { + dm := make(map[string]interface{}) + if v.Destination.Metrics.Status != nil { + dm["status"] = aws.StringValue(v.Destination.Metrics.Status) + dmetm := make(map[string]interface{}) + dmetm["minutes"] = aws.Int64Value(v.Destination.Metrics.EventThreshold.Minutes) + dm["event_threshold"] = []interface{}{dmetm} + rd["metrics"] = []interface{}{dm} } } t["destination"] = []interface{}{rd} @@ -464,6 +504,20 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } } + rm, ok := bd["metrics"].([]interface{}) + if ok && len(rm) > 0 { + s := rm[0].(map[string]interface{}) + if et, ok := s["event_threshold"].([]interface{}); ok && len(et) > 0 { + m := et[0].(map[string]interface{}) + ruleDestination.Metrics = &s3.Metrics{ + Status: aws.String(s["status"].(string)), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 7d44e94c166..70ff1e4cea9 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -475,7 +475,8 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { resourceName, []*s3.ReplicationRule{ { - ID: aws.String("foobar"), + ID: aws.String("foobar"), + Priority: aws.Int64(0), Destination: &s3.Destination{ Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), ReplicationTime: &s3.ReplicationTime{ @@ -484,8 +485,19 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { Minutes: aws.Int64(15), }, }, + Metrics: &s3.Metrics{ + Status: aws.String(s3.MetricsStatusEnabled), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), }, - Prefix: aws.String("foo"), Status: aws.String(s3.ReplicationRuleStatusEnabled), }, }, @@ -981,9 +993,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { rules { id = "foobar" - prefix = "foo" + filter { + prefix = "foo" + } status = "Enabled" - destination { bucket = aws_s3_bucket.destination.arn replication_time { @@ -992,6 +1005,12 @@ resource "aws_s3_bucket_replication_configuration" "replication" { minutes = 15 } } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } + } } } }` From 8d2fa83cc01c7dad0c8bb3fd7b67cd56c1ed4a62 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 16:14:24 -0700 Subject: [PATCH 51/80] Adding Replica Modifications support, with tests Update the the source_selection_criteria configuration to include the replica_modificaions. Refactored sse_kms_encrypted_objects schema to map closer to the actual AWS SDK structure. --- ...aws_s3_bucket_replication_configuration.go | 44 ++++++--- ...3_bucket_replication_configuration_test.go | 99 +++++++++++++++---- 2 files changed, 109 insertions(+), 34 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1cf97c4b1d8..8cfffba4044 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -159,7 +159,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "sse_kms_encrypted_objects": { @@ -169,9 +169,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + }, + }, + }, + }, + "replica_modifications": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), }, }, }, @@ -391,11 +407,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met tssc := make(map[string]interface{}) if vssc.SseKmsEncryptedObjects != nil { tSseKms := make(map[string]interface{}) - if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { - tSseKms["enabled"] = true - } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { - tSseKms["enabled"] = false - } + tSseKms["status"] = aws.StringValue(vssc.SseKmsEncryptedObjects.Status) tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} } t["source_selection_criteria"] = []interface{}{tssc} @@ -530,14 +542,18 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m if sseKms[0] != nil { sseKmsValues := sseKms[0].(map[string]interface{}) sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} - if sseKmsValues["enabled"].(bool) { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) - } else { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) - } + sseKmsEncryptedObjects.Status = aws.String(sseKmsValues["status"].(string)) ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects } } + if sscRm, ok := sscValues["replica_modifications"].([]interface{}); ok && len(sscRm) > 0 { + if sscRm[0] != nil { + replicaModValues := sscRm[0].(map[string]interface{}) + replicaModifications := &s3.ReplicaModifications{} + replicaModifications.Status = aws.String(replicaModValues["status"].(string)) + ruleSsc.ReplicaModifications = replicaModifications + } + } rcRule.SourceSelectionCriteria = ruleSsc } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 70ff1e4cea9..acb0374ab3f 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -508,6 +508,59 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }) } +func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Priority: aws.Int64(0), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + ReplicaModifications: &s3.ReplicaModifications{ + Status: aws.String(s3.ReplicaModificationsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -1016,6 +1069,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } +func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" + } + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), @@ -1254,7 +1331,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1341,7 +1418,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1366,24 +1443,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } ` } -func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} ` -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { From 7d246e558185dafed89ae6c3e5b4faaa9755518e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 14:42:41 -0700 Subject: [PATCH 52/80] terrafmt --- ...3_bucket_replication_configuration_test.go | 570 +++++++++--------- 1 file changed, 285 insertions(+), 285 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index acb0374ab3f..522c3389979 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1002,94 +1002,94 @@ resource "aws_s3_bucket" "destination" { } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket" "source" { - bucket = "tf-test-bucket-source-%[1]d" + bucket = "tf-test-bucket-source-%[1]d" versioning { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } -} `, randInt) +}`, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "%[1]s" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "%[1]s" } -} `, storageClass) + } +}`, storageClass) } func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" + rules { + id = "foobar" + filter { + prefix = "foo" + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - replication_time { - status = "Enabled" - time { - minutes = 15 - } - } - metrics { - status = "Enabled" - event_threshold { - minutes = 15 - } - } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } } } + } }` } func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" - } - source_selection_criteria { - replica_modifications { - status = "Enabled" - } - } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" } } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } }` } @@ -1105,7 +1105,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1117,54 +1117,54 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } - -} `, randInt)) + } + +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1179,7 +1179,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1191,65 +1191,65 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter { - prefix = "prefix3" + filter { + prefix = "prefix3" - tags = { - Key3 = "Value3" - } + tags = { + Key3 = "Value3" } + } - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1264,46 +1264,46 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1316,26 +1316,26 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1344,24 +1344,24 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1370,20 +1370,20 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1398,176 +1398,176 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - } + destination { + bucket = aws_s3_bucket.destination.arn } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - delete_marker_replication_status = "Enabled" + delete_marker_replication_status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 42 + priority = 42 - filter { - tags = { - ReplicateMe = "Yes" - } + filter { + tags = { + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 41 + priority = 41 - filter { - prefix = "foo" + filter { + prefix = "foo" - tags = { - AnotherTag = "OK" - ReplicateMe = "Yes" - } + tags = { + AnotherTag = "OK" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - tags = { - AnotherTag = "OK" - Foo = "Bar" - ReplicateMe = "Yes" - } + filter { + tags = { + AnotherTag = "OK" + Foo = "Bar" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { From c991dffd86044045146cf618425957279401764d Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 15:46:23 -0700 Subject: [PATCH 53/80] terrafmt --- ...3_bucket_replication_configuration_test.go | 89 ++++++++++--------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 522c3389979..012e5f5bb75 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1573,7 +1573,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { return fmt.Sprintf(` resource "aws_iam_role" "test" { - name = %[1]q + name = "%[1]s" assume_role_policy = < Date: Fri, 3 Sep 2021 15:50:53 -0700 Subject: [PATCH 54/80] Initial documentation for new resource Adding documentation page for the new independent resource. Initialized with content copied over from the s3_bucket.html.markdown page. --- ...et_replication_configuration.html.markdown | 190 ++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 website/docs/r/s3_bucket_replication_configuration.html.markdown diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown new file mode 100644 index 00000000000..9333835f4d5 --- /dev/null +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -0,0 +1,190 @@ +--- +subcategory: "S3" +layout: "aws" +page_title: "AWS: aws_s3_bucket_replication_configuration" +description: |- + Provides a S3 bucket replication configuration resource. +--- + +# Resource: aws_s3_bucket_replication_configuration + +Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. + +## Example Usage + +### Using replication configuration + +```terraform +provider "aws" { + region = "eu-west-1" +} + +provider "aws" { + alias = "central" + region = "eu-central-1" +} + +resource "aws_iam_role" "replication" { + name = "tf-iam-role-replication-12345" + + assume_role_policy = < **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. +Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `destination` - (Required) Specifies the destination for the rule (documented below). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. +* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. + +The `destination` object supports the following: + +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + `sse_kms_encrypted_objects` source selection criteria. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. + +The `source_selection_criteria` object supports the following: + +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. + +The `sse_kms_encrypted_objects` object supports the following: + +* `enabled` - (Required) Boolean which indicates if this criteria is enabled. + +The `filter` object supports the following: + +* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. +The rule applies only to objects having all the tags in its tagset. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +## Import + +S3 bucket replication configuration can be imported using the `bucket`, e.g. + +``` +$ terraform import aws_s3_bucket_replication_configuration.replication bucket-name +``` From ed53680190b43aad124dc5c62ae78a594dc2f3f9 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 9 Sep 2021 14:26:09 -0700 Subject: [PATCH 55/80] adding new feature documentation --- website/docs/r/s3_bucket.html.markdown | 14 +++++++ ...et_replication_configuration.html.markdown | 40 ++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index bdce5f7f38c..6d243a9dccf 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -301,6 +301,10 @@ resource "aws_s3_bucket" "source" { } ``` +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + + + ### Enable Default Server Side Encryption ```terraform @@ -436,6 +440,16 @@ The `noncurrent_version_transition` object supports the following The `replication_configuration` object supports the following: +~> **NOTE:** See the `aws_s3_bucket_replication_configuration` resource documentation to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9333835f4d5..9870635e069 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -107,6 +107,11 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + lifecycle { + ignore_changes = [ + replication_configuration + ] + } } aws_s3_bucket_replication_configuration replication { @@ -126,6 +131,17 @@ aws_s3_bucket_replication_configuration replication { ``` +~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + + ## Argument Reference The following arguments are supported: @@ -142,6 +158,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). @@ -153,6 +170,10 @@ Replication configuration V1 supports filtering based on only the `prefix` attri ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +The `existing_object_replication` object supports the following: + +* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -161,15 +182,32 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). +* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). + +The `replication_time` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + +The `metrics` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `source_selection_criteria` object supports the following: +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between + replicas and source objects (documented below). + * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` in `destination` must be specified as well. +The `replica_modifications` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `sse_kms_encrypted_objects` object supports the following: -* `enabled` - (Required) Boolean which indicates if this criteria is enabled. +* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `filter` object supports the following: From e4b87afcb83771ab5a636ac9cca76d9f75eb7515 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:09 -0700 Subject: [PATCH 56/80] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 4 ++-- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 012e5f5bb75..e550fcad46d 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,11 +844,11 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = true +const isExistingObjectReplicationBlocked = false func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { - /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + /* https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. Once that request is approved, this can be unblocked for testing. */ return diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9870635e069..2511d95454b 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -172,6 +172,8 @@ Replication configuration V1 supports filtering based on only the `prefix` attri The `existing_object_replication` object supports the following: +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) + * `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `destination` object supports the following: From 8a024bd3394bc9b9d7e2bfee9a571383d3bb5113 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:56 -0700 Subject: [PATCH 57/80] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e550fcad46d..fed215ad3c1 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,7 +844,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = false +const isExistingObjectReplicationBlocked = true func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { From 64d2ae8d6168c103f89dd4036d2a29ae783cc119 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 13 Sep 2021 15:01:45 -0700 Subject: [PATCH 58/80] adding headers and source examples to documentation --- ...et_replication_configuration.html.markdown | 162 +++++++++++++++--- 1 file changed, 139 insertions(+), 23 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 2511d95454b..091286c63ab 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -131,6 +131,76 @@ aws_s3_bucket_replication_configuration replication { ``` +### Bi-Directional Replication + +``` + +... + +resource "aws_s3_bucket" "east" { + bucket = "tf-test-bucket-east-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +resource "aws_s3_bucket" "west" { + provider = west + bucket = "tf-test-bucket-west-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +aws_s3_bucket_replication_configuration "east_to_west" { + role = aws_iam_role.east_replication.arn + bucket = aws_s3_bucket.east.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.west.arn + storage_class = "STANDARD" + } + } +} + +aws_s3_bucket_replication_configuration "west_to_east" { + role = aws_iam_role.west_replication.arn + bucket = aws_s3_bucket.west.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.east.arn + storage_class = "STANDARD" + } + } +} +``` + +## Usage Notes + +This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. + ~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. ``` @@ -140,11 +210,17 @@ lifecycle { ] } ``` +The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +* `replica_modifications` - Added to the `source_selection_criteria` configuration +* `metrics` - Added to the `destination` configuration +* `replication_time` - Added to the `destination` configuration +* `existing_object_replication` - Added to the replication rule + +Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference -The following arguments are supported: +## Argument Reference The `replication_configuration` object supports the following: @@ -152,30 +228,42 @@ The `replication_configuration` object supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). -The `rules` object supports the following: +### Rules + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. ~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. + +The `rules` object supports the following: + With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). -* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. +* `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +### Rule Existing Object Replication + +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) +``` +existing_object_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -187,29 +275,61 @@ The `destination` object supports the following: * `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). * `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +### Replication Time Control + +``` +replication_time { + status = "Enabled" + time { + minutes = 15 + } +} +``` + The `replication_time` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. + +### Metrics + +``` +metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } +} +``` The `metrics` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. + +### Source Selection Criteria The `source_selection_criteria` object supports the following: +``` +source_selection_criteria { + replica_modification { + status = "Enabled" + } + sse_kms_encrypted_objects { + status = "Enabled" + } +} +``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects (documented below). - -* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. - -The `replica_modifications` object supports the following: + replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -The `sse_kms_encrypted_objects` object supports the following: + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Replication Rule Filter The `filter` object supports the following: @@ -217,10 +337,6 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 54585b3371adff762db6df0d8cf3370d68bef99c Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 14 Sep 2021 10:30:09 -0700 Subject: [PATCH 59/80] adding internal documentation links, cleanup --- ...et_replication_configuration.html.markdown | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 091286c63ab..099b3261ab7 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -8,7 +8,7 @@ description: |- # Resource: aws_s3_bucket_replication_configuration -Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. +Provides an independent configuration resource for S3 bucket [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). ## Example Usage @@ -199,9 +199,9 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. +~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. ``` lifecycle { @@ -210,25 +210,25 @@ lifecycle { ] } ``` -The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: -* `replica_modifications` - Added to the `source_selection_criteria` configuration -* `metrics` - Added to the `destination` configuration -* `replication_time` - Added to the `destination` configuration -* `existing_object_replication` - Added to the replication rule +* `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) +* `metrics` - Added to the `destination` configuration object [documented below](#metrics) +* `replication_time` - Added to the `destination` configuration object [documented below](#replication_time) +* `existing_object_replication` - Added to the replication rule object [documented below](#existing_object_replication) Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) ## Argument Reference -The `replication_configuration` object supports the following: +The `replication_configuration` resource supports the following: -* `bucket` - (Required) The ARN of the source S3 bucket where you want Amazon S3 to monitor. +* `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. -* `rules` - (Required) Specifies the rules managing the replication (documented below). +* `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### Rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -236,20 +236,19 @@ The `replication_configuration` object supports the following: The `rules` object supports the following: -With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. -Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. -* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). -* `destination` - (Required) Specifies the destination for the rule (documented below). -* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `destination` - (Required) Specifies the destination for the rule [documented below](#destination). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. -* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### Rule Existing Object Replication +### exiting_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) @@ -263,7 +262,7 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -### Destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -272,10 +271,10 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. -* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). -* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +* `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). +* `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). -### Replication Time Control +### replication_time ``` replication_time { @@ -291,7 +290,7 @@ The `replication_time` object supports the following: * `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. -### Metrics +### metrics ``` metrics { @@ -307,7 +306,7 @@ The `metrics` object supports the following: * `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. -### Source Selection Criteria +### source_selection_criteria The `source_selection_criteria` object supports the following: ``` @@ -322,14 +321,14 @@ source_selection_criteria { ``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -### Replication Rule Filter +### filter The `filter` object supports the following: From 80b6e26a05b55e68ca8336313e422a65b71a0b15 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 15 Sep 2021 16:34:21 -0700 Subject: [PATCH 60/80] Align delete_marker_replication with other objects --- ...aws_s3_bucket_replication_configuration.go | 50 +++++++++++-------- ...3_bucket_replication_configuration_test.go | 12 +++-- internal/service/s3/bucket.go | 2 +- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 8cfffba4044..581ac90653f 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -96,13 +96,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), }, "event_threshold": { Type: schema.TypeList, @@ -126,13 +126,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), }, "time": { Type: schema.TypeList, @@ -172,7 +172,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), }, }, }, @@ -187,7 +187,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), }, }, }, @@ -235,15 +235,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), }, }, }, }, - "delete_marker_replication_status": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + "delete_marker_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), + }, + }, + }, }, }, }, @@ -391,7 +401,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) - t["existing_object_replication"] = status + t["existing_object_replication"] = []interface{}{status} } if v.ID != nil { @@ -431,8 +441,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } t["filter"] = []interface{}{m} - if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { - t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + t["delete_marker_replication"] = []interface{}{status} } } @@ -573,13 +585,11 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) } - if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(dmr), - } - } else { + dmr, ok := rr["delete_marker_replication"].([]interface{}) + if ok && len(dmr) > 0 { + s := dmr[0].(map[string]interface{}) rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s["status"].(string)), } } } else { diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index fed215ad3c1..c5c6879cf80 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1479,7 +1479,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "foo" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1628,7 +1630,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "testprefix" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1701,7 +1705,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 5bc4db22b41..b52ccb998da 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2662,7 +2662,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } From d198d93187e7a7c4f0d12e2042fb0b2da9e7cf9f Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 08:31:08 -0700 Subject: [PATCH 61/80] Update delete_marker replication docs to reflect changes --- ...ket_replication_configuration.html.markdown | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 099b3261ab7..44d46f1c4d0 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -239,7 +239,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication` - (Optional) Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when `filter` is used)[documented below](#delete_marker_replication). * `destination` - (Required) Specifies the destination for the rule [documented below](#destination). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. @@ -262,6 +262,22 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +### delete_marker_replication + +~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. + +~> **NOTE:** This argument is only available with V2 replication configurations. + +The `delete_marker_replication` object supports the following: + +``` +delete_marker_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + + ### destination The `destination` object supports the following: From e5d3d0625a10dee1a815fae3ddef7c9c6ed13cfe Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 13:59:37 -0700 Subject: [PATCH 62/80] Documentation adjustments fix typos shift notes to be above examples remove unnecssary words expand on some attribute concepts that maybe obscure --- website/docs/r/s3_bucket.html.markdown | 6 +-- ...et_replication_configuration.html.markdown | 51 ++++++++++--------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 6d243a9dccf..57bb495ebd7 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -178,6 +178,8 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using replication configuration +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + ```terraform provider "aws" { region = "eu-west-1" @@ -301,10 +303,6 @@ resource "aws_s3_bucket" "source" { } ``` -~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. - - - ### Enable Default Server Side Encryption ```terraform diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 44d46f1c4d0..de512bcb34d 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,9 +133,9 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -``` +```terraform -... +#... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -199,17 +199,18 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. - ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -``` +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. + +```terraform lifecycle { ignore_changes = [ replication_configuration ] } ``` + The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: * `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) @@ -248,51 +249,53 @@ With the `filter` attribute, you can specify object filters based on the object * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### exiting_object_replication +### existing_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -``` +```terraform existing_object_replication { status = "Enabled" } ``` -* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. ### delete_marker_replication -~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. +~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. ~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: -``` +```terraform delete_marker_replication { status = "Enabled" } ``` -* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. ### destination The `destination` object supports the following: -* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. -* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. * `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with `sse_kms_encrypted_objects` source selection criteria. -* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. -* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. * `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). * `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). ### replication_time -``` +```terraform replication_time { status = "Enabled" time { @@ -303,12 +306,12 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics -``` +```terraform metrics { status = "Enabled" event_threshold { @@ -319,13 +322,14 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria The `source_selection_criteria` object supports the following: -``` + +```terraform source_selection_criteria { replica_modification { status = "Enabled" @@ -336,13 +340,14 @@ source_selection_criteria { } ``` + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. + * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. - ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. ### filter From 6c15db52797a9b7561a1448855be36a6727a8345 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:15:32 -0700 Subject: [PATCH 63/80] linting --- ...s3_bucket_replication_configuration.html.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index de512bcb34d..ec38b8c4d2d 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -229,7 +229,7 @@ The `replication_configuration` resource supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -268,7 +268,7 @@ existing_object_replication { ~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. -~> **NOTE:** This argument is only available with V2 replication configurations. +~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: @@ -281,7 +281,7 @@ delete_marker_replication { * `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. -### destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. @@ -306,7 +306,7 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics @@ -322,7 +322,7 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria @@ -342,7 +342,7 @@ source_selection_criteria { ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` From 1a887de980d1619b9eedb1a1421ce2c2866b2392 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:41:01 -0700 Subject: [PATCH 64/80] linting/fmt --- aws/resource_aws_s3_bucket_replication_configuration.go | 2 +- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 581ac90653f..9fed71889cf 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -347,7 +347,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met r := replication.ReplicationConfiguration // set role if r.Role != nil && aws.StringValue(r.Role) != "" { - d.Set("role", aws.StringValue(r.Role)) + d.Set("role", r.Role) } rules := make([]interface{}, 0, len(r.Rules)) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index ec38b8c4d2d..c119ce2bcd0 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference +## Attributes Reference The `replication_configuration` resource supports the following: From 01bd0b247d9bca626059290905ae1901784ae552 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 15:00:32 -0700 Subject: [PATCH 65/80] adding missing attribute reference to documentation --- .../r/s3_bucket_replication_configuration.html.markdown | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index c119ce2bcd0..eea1a86a844 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Attributes Reference +## Argument Reference The `replication_configuration` resource supports the following: @@ -357,6 +357,12 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* id - Resource id is the s3 source bucket name. + ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 0d92fba850a5f7e39f01977ae8e77540eee0d800 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 23 Sep 2021 07:46:36 -0700 Subject: [PATCH 66/80] use untyped code blocks until new resource is merged to validate --- ...ucket_replication_configuration.html.markdown | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index eea1a86a844..ee5ce245b7b 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -14,7 +14,7 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration -```terraform +``` provider "aws" { region = "eu-west-1" } @@ -133,7 +133,7 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -```terraform +``` #... @@ -203,7 +203,7 @@ aws_s3_bucket_replication_configuration "west_to_east" { This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -```terraform +``` lifecycle { ignore_changes = [ replication_configuration @@ -255,7 +255,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```terraform +``` existing_object_replication { status = "Enabled" } @@ -272,7 +272,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```terraform +``` delete_marker_replication { status = "Enabled" } @@ -295,7 +295,7 @@ The `destination` object supports the following: ### replication_time -```terraform +``` replication_time { status = "Enabled" time { @@ -311,7 +311,7 @@ The `replication_time` object supports the following: ### metrics -```terraform +``` metrics { status = "Enabled" event_threshold { @@ -329,7 +329,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```terraform +``` source_selection_criteria { replica_modification { status = "Enabled" From e4778325052a61e60a52adb0a6d78339d7931dab Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:49:58 -0400 Subject: [PATCH 67/80] address linter-related errors --- ...3_bucket_replication_configuration_test.go | 14 ++++++-------- ...et_replication_configuration.html.markdown | 19 +++++++++---------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c5c6879cf80..d8274ed6cf2 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1480,8 +1480,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1631,8 +1631,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1706,8 +1706,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1715,7 +1715,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } - - `, rName, rNameDestination, rInt) } diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index ee5ce245b7b..90e3faf8de3 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -114,9 +114,9 @@ resource "aws_s3_bucket" "source" { } } -aws_s3_bucket_replication_configuration replication { +resource "aws_s3_bucket_replication_configuration" "replication" { role = aws_iam_role.replication.arn - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id rules { id = "foobar" prefix = "foo" @@ -128,14 +128,13 @@ aws_s3_bucket_replication_configuration replication { } } } - ``` ### Bi-Directional Replication ``` -#... +# ... other configuration ... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -166,9 +165,9 @@ resource "aws_s3_bucket" "west" { } } -aws_s3_bucket_replication_configuration "east_to_west" { +resource "aws_s3_bucket_replication_configuration" "east_to_west" { role = aws_iam_role.east_replication.arn - bucket = aws_s3_bucket.east.id + bucket = aws_s3_bucket.east.id rules { id = "foobar" prefix = "foo" @@ -181,9 +180,9 @@ aws_s3_bucket_replication_configuration "east_to_west" { } } -aws_s3_bucket_replication_configuration "west_to_east" { +resource "aws_s3_bucket_replication_configuration" "west_to_east" { role = aws_iam_role.west_replication.arn - bucket = aws_s3_bucket.west.id + bucket = aws_s3_bucket.west.id rules { id = "foobar" prefix = "foo" @@ -201,7 +200,7 @@ aws_s3_bucket_replication_configuration "west_to_east" { ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. ``` lifecycle { @@ -357,7 +356,7 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attribute Reference +## Attributes Reference In addition to all arguments above, the following attributes are exported: From 3622ba5120cc8df8b99d260db6cc23de5b3b53f2 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:52:12 -0400 Subject: [PATCH 68/80] Update CHANGELOG for #20777 --- .changelog/20777.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/20777.txt diff --git a/.changelog/20777.txt b/.changelog/20777.txt new file mode 100644 index 00000000000..75e556fa77e --- /dev/null +++ b/.changelog/20777.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_s3_bucket_replication_configuration +``` From a241e956e5aa05eed45fe9de8efce9f873c1cafe Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 10:17:16 -0400 Subject: [PATCH 69/80] forgo syntax highlighting in short snippet code blocks in documentation --- .../docs/r/s3_bucket_replication_configuration.html.markdown | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 90e3faf8de3..8de6f01c14e 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,7 +133,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication ``` - # ... other configuration ... resource "aws_s3_bucket" "east" { @@ -366,6 +365,6 @@ In addition to all arguments above, the following attributes are exported: S3 bucket replication configuration can be imported using the `bucket`, e.g. -``` +```sh $ terraform import aws_s3_bucket_replication_configuration.replication bucket-name ``` From b8be20a184a2f110778eae6defcfbd85a9db6f68 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 24 Sep 2021 10:40:10 -0700 Subject: [PATCH 70/80] Revert key renamed in error --- internal/service/s3/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index b52ccb998da..5bc4db22b41 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2662,7 +2662,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } From 9614e741543169ada01f7d20f3be960eecd9b362 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sun, 26 Sep 2021 13:36:06 -0700 Subject: [PATCH 71/80] Add logic for explicit delete Include delete logic for replication configuration Adding test for delete logic --- ...aws_s3_bucket_replication_configuration.go | 13 ++++ ...3_bucket_replication_configuration_test.go | 62 +++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 9fed71889cf..42cbce8d6ef 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -628,6 +628,19 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) + + dbri := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(bucket), + } + + _, err := s3conn.DeleteBucketReplication(dbri) + if err != nil { + return fmt.Errorf("Error removing S3 bucket replication: %s", err) + } return nil } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index d8274ed6cf2..ff518137550 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -906,6 +906,68 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) }) } +func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + testDeleted := func(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[r] + if ok { + return fmt.Errorf("Replication resource configuration %q should have been deleted.", r) + } + return nil + } + } + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSS3BucketReplicationConfigBasic(rInt), + Check: resource.ComposeTestCheckFunc(testDeleted(resourceName)), + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] From 821a2b4e9009e6a9ee48d5db6a28d813014fb018 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 2 Nov 2021 09:41:45 -0700 Subject: [PATCH 72/80] move source into service/s3 dir tracking upstream changes --- .../service/s3/bucket_replication_configuration.go | 0 .../service/s3/bucket_replication_configuration_test.go | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename aws/resource_aws_s3_bucket_replication_configuration.go => internal/service/s3/bucket_replication_configuration.go (100%) rename aws/resource_aws_s3_bucket_replication_configuration_test.go => internal/service/s3/bucket_replication_configuration_test.go (100%) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go similarity index 100% rename from aws/resource_aws_s3_bucket_replication_configuration.go rename to internal/service/s3/bucket_replication_configuration.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go similarity index 100% rename from aws/resource_aws_s3_bucket_replication_configuration_test.go rename to internal/service/s3/bucket_replication_configuration_test.go From a367de4692b7f8435dfe7a112d40ecd6a0779587 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 2 Nov 2021 09:51:18 -0700 Subject: [PATCH 73/80] tracking up stream changes --- internal/provider/provider.go | 21 ++++++++++--------- .../s3/bucket_replication_configuration.go | 4 ++-- .../bucket_replication_configuration_test.go | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 7471764fb8f..c1ea5c8d337 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1459,16 +1459,17 @@ func Provider() *schema.Provider { "aws_route53_resolver_rule": route53resolver.ResourceRule(), "aws_route53_resolver_rule_association": route53resolver.ResourceRuleAssociation(), - "aws_s3_bucket": s3.ResourceBucket(), - "aws_s3_bucket_analytics_configuration": s3.ResourceBucketAnalyticsConfiguration(), - "aws_s3_bucket_inventory": s3.ResourceBucketInventory(), - "aws_s3_bucket_metric": s3.ResourceBucketMetric(), - "aws_s3_bucket_notification": s3.ResourceBucketNotification(), - "aws_s3_bucket_object": s3.ResourceBucketObject(), - "aws_s3_bucket_ownership_controls": s3.ResourceBucketOwnershipControls(), - "aws_s3_bucket_policy": s3.ResourceBucketPolicy(), - "aws_s3_bucket_public_access_block": s3.ResourceBucketPublicAccessBlock(), - "aws_s3_object_copy": s3.ResourceObjectCopy(), + "aws_s3_bucket": s3.ResourceBucket(), + "aws_s3_bucket_analytics_configuration": s3.ResourceBucketAnalyticsConfiguration(), + "aws_s3_bucket_inventory": s3.ResourceBucketInventory(), + "aws_s3_bucket_metric": s3.ResourceBucketMetric(), + "aws_s3_bucket_notification": s3.ResourceBucketNotification(), + "aws_s3_bucket_object": s3.ResourceBucketObject(), + "aws_s3_bucket_ownership_controls": s3.ResourceBucketOwnershipControls(), + "aws_s3_bucket_policy": s3.ResourceBucketPolicy(), + "aws_s3_bucket_public_access_block": s3.ResourceBucketPublicAccessBlock(), + "aws_s3_bucket_replication_configuration": s3.ResourceBucketReplicationConfiguration(), + "aws_s3_object_copy": s3.ResourceObjectCopy(), "aws_s3_access_point": s3control.ResourceAccessPoint(), "aws_s3_account_public_access_block": s3control.ResourceAccountPublicAccessBlock(), diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 42cbce8d6ef..4c693709341 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -1,4 +1,4 @@ -package aws +package s3 import ( "errors" @@ -18,7 +18,7 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) -func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { +func ResourceBucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ Create: resourceAwsS3BucketReplicationConfigurationPut, Read: resourceAwsS3BucketReplicationConfigurationRead, diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index ff518137550..28bc9f8e502 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1,4 +1,4 @@ -package aws +package s3 import ( "fmt" From 37352d0d2cfaa4608c35610a201d0df9aa93ec79 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 3 Nov 2021 11:24:11 -0700 Subject: [PATCH 74/80] tracking upstream changes --- aws/provider.go | 1702 ----------------- .../s3/bucket_replication_configuration.go | 6 +- 2 files changed, 3 insertions(+), 1705 deletions(-) delete mode 100644 aws/provider.go diff --git a/aws/provider.go b/aws/provider.go deleted file mode 100644 index 5641b36adb4..00000000000 --- a/aws/provider.go +++ /dev/null @@ -1,1702 +0,0 @@ -package aws - -import ( - "log" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/mutexkv" -) - -// Provider returns a *schema.Provider. -func Provider() *schema.Provider { - // TODO: Move the validation to this, requires conditional schemas - // TODO: Move the configuration to this, requires validation - - // The actual provider - provider := &schema.Provider{ - Schema: map[string]*schema.Schema{ - "access_key": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["access_key"], - }, - - "secret_key": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["secret_key"], - }, - - "profile": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["profile"], - }, - - "assume_role": assumeRoleSchema(), - - "shared_credentials_file": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["shared_credentials_file"], - }, - - "token": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["token"], - }, - - "region": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", - }, nil), - Description: descriptions["region"], - InputDefault: "us-east-1", // lintignore:AWSAT003 - }, - - "max_retries": { - Type: schema.TypeInt, - Optional: true, - Default: 25, - Description: descriptions["max_retries"], - }, - - "allowed_account_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ConflictsWith: []string{"forbidden_account_ids"}, - Set: schema.HashString, - }, - - "forbidden_account_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ConflictsWith: []string{"allowed_account_ids"}, - Set: schema.HashString, - }, - - "default_tags": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Configuration block with settings to default resource tags across all resources.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Resource tags to default across all resources", - }, - }, - }, - }, - - "endpoints": endpointsSchema(), - - "ignore_tags": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Configuration block with settings to ignore resource tags across all resources.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "keys": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Description: "Resource tag keys to ignore across all resources.", - }, - "key_prefixes": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Description: "Resource tag key prefixes to ignore across all resources.", - }, - }, - }, - }, - - "insecure": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["insecure"], - }, - - "skip_credentials_validation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_credentials_validation"], - }, - - "skip_get_ec2_platforms": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_get_ec2_platforms"], - }, - - "skip_region_validation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_region_validation"], - }, - - "skip_requesting_account_id": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_requesting_account_id"], - }, - - "skip_metadata_api_check": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_metadata_api_check"], - }, - - "s3_force_path_style": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["s3_force_path_style"], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "aws_acm_certificate": dataSourceAwsAcmCertificate(), - "aws_acmpca_certificate_authority": dataSourceAwsAcmpcaCertificateAuthority(), - "aws_acmpca_certificate": dataSourceAwsAcmpcaCertificate(), - "aws_ami": dataSourceAwsAmi(), - "aws_ami_ids": dataSourceAwsAmiIds(), - "aws_api_gateway_api_key": dataSourceAwsApiGatewayApiKey(), - "aws_api_gateway_domain_name": dataSourceAwsApiGatewayDomainName(), - "aws_api_gateway_resource": dataSourceAwsApiGatewayResource(), - "aws_api_gateway_rest_api": dataSourceAwsApiGatewayRestApi(), - "aws_api_gateway_vpc_link": dataSourceAwsApiGatewayVpcLink(), - "aws_apigatewayv2_api": dataSourceAwsApiGatewayV2Api(), - "aws_apigatewayv2_apis": dataSourceAwsApiGatewayV2Apis(), - "aws_appmesh_mesh": dataSourceAwsAppmeshMesh(), - "aws_appmesh_virtual_service": dataSourceAwsAppmeshVirtualService(), - "aws_arn": dataSourceAwsArn(), - "aws_autoscaling_group": dataSourceAwsAutoscalingGroup(), - "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), - "aws_availability_zone": dataSourceAwsAvailabilityZone(), - "aws_availability_zones": dataSourceAwsAvailabilityZones(), - "aws_backup_plan": dataSourceAwsBackupPlan(), - "aws_backup_selection": dataSourceAwsBackupSelection(), - "aws_backup_vault": dataSourceAwsBackupVault(), - "aws_batch_compute_environment": dataSourceAwsBatchComputeEnvironment(), - "aws_batch_job_queue": dataSourceAwsBatchJobQueue(), - "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), - "aws_caller_identity": dataSourceAwsCallerIdentity(), - "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), - "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), - "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), - "aws_cloudformation_type": dataSourceAwsCloudFormationType(), - "aws_cloudfront_cache_policy": dataSourceAwsCloudFrontCachePolicy(), - "aws_cloudfront_distribution": dataSourceAwsCloudFrontDistribution(), - "aws_cloudfront_function": dataSourceAwsCloudFrontFunction(), - "aws_cloudfront_origin_request_policy": dataSourceAwsCloudFrontOriginRequestPolicy(), - "aws_cloudhsm_v2_cluster": dataSourceCloudHsmV2Cluster(), - "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), - "aws_cloudwatch_event_connection": dataSourceAwsCloudwatchEventConnection(), - "aws_cloudwatch_event_source": dataSourceAwsCloudWatchEventSource(), - "aws_cloudwatch_log_group": dataSourceAwsCloudwatchLogGroup(), - "aws_codeartifact_authorization_token": dataSourceAwsCodeArtifactAuthorizationToken(), - "aws_codeartifact_repository_endpoint": dataSourceAwsCodeArtifactRepositoryEndpoint(), - "aws_cognito_user_pools": dataSourceAwsCognitoUserPools(), - "aws_codecommit_repository": dataSourceAwsCodeCommitRepository(), - "aws_codestarconnections_connection": dataSourceAwsCodeStarConnectionsConnection(), - "aws_cur_report_definition": dataSourceAwsCurReportDefinition(), - "aws_default_tags": dataSourceAwsDefaultTags(), - "aws_db_cluster_snapshot": dataSourceAwsDbClusterSnapshot(), - "aws_db_event_categories": dataSourceAwsDbEventCategories(), - "aws_db_instance": dataSourceAwsDbInstance(), - "aws_db_snapshot": dataSourceAwsDbSnapshot(), - "aws_db_subnet_group": dataSourceAwsDbSubnetGroup(), - "aws_directory_service_directory": dataSourceAwsDirectoryServiceDirectory(), - "aws_docdb_engine_version": dataSourceAwsDocdbEngineVersion(), - "aws_docdb_orderable_db_instance": dataSourceAwsDocdbOrderableDbInstance(), - "aws_dx_gateway": dataSourceAwsDxGateway(), - "aws_dynamodb_table": dataSourceAwsDynamoDbTable(), - "aws_ebs_default_kms_key": dataSourceAwsEbsDefaultKmsKey(), - "aws_ebs_encryption_by_default": dataSourceAwsEbsEncryptionByDefault(), - "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), - "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), - "aws_ebs_volume": dataSourceAwsEbsVolume(), - "aws_ebs_volumes": dataSourceAwsEbsVolumes(), - "aws_ec2_coip_pool": dataSourceAwsEc2CoipPool(), - "aws_ec2_coip_pools": dataSourceAwsEc2CoipPools(), - "aws_ec2_instance_type": dataSourceAwsEc2InstanceType(), - "aws_ec2_instance_type_offering": dataSourceAwsEc2InstanceTypeOffering(), - "aws_ec2_instance_type_offerings": dataSourceAwsEc2InstanceTypeOfferings(), - "aws_ec2_local_gateway": dataSourceAwsEc2LocalGateway(), - "aws_ec2_local_gateways": dataSourceAwsEc2LocalGateways(), - "aws_ec2_local_gateway_route_table": dataSourceAwsEc2LocalGatewayRouteTable(), - "aws_ec2_local_gateway_route_tables": dataSourceAwsEc2LocalGatewayRouteTables(), - "aws_ec2_local_gateway_virtual_interface": dataSourceAwsEc2LocalGatewayVirtualInterface(), - "aws_ec2_local_gateway_virtual_interface_group": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroup(), - "aws_ec2_local_gateway_virtual_interface_groups": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroups(), - "aws_ec2_managed_prefix_list": dataSourceAwsEc2ManagedPrefixList(), - "aws_ec2_spot_price": dataSourceAwsEc2SpotPrice(), - "aws_ec2_transit_gateway": dataSourceAwsEc2TransitGateway(), - "aws_ec2_transit_gateway_dx_gateway_attachment": dataSourceAwsEc2TransitGatewayDxGatewayAttachment(), - "aws_ec2_transit_gateway_peering_attachment": dataSourceAwsEc2TransitGatewayPeeringAttachment(), - "aws_ec2_transit_gateway_route_table": dataSourceAwsEc2TransitGatewayRouteTable(), - "aws_ec2_transit_gateway_route_tables": dataSourceAwsEc2TransitGatewayRouteTables(), - "aws_ec2_transit_gateway_vpc_attachment": dataSourceAwsEc2TransitGatewayVpcAttachment(), - "aws_ec2_transit_gateway_vpn_attachment": dataSourceAwsEc2TransitGatewayVpnAttachment(), - "aws_ecr_authorization_token": dataSourceAwsEcrAuthorizationToken(), - "aws_ecr_image": dataSourceAwsEcrImage(), - "aws_ecr_repository": dataSourceAwsEcrRepository(), - "aws_ecs_cluster": dataSourceAwsEcsCluster(), - "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), - "aws_ecs_service": dataSourceAwsEcsService(), - "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), - "aws_customer_gateway": dataSourceAwsCustomerGateway(), - "aws_efs_access_point": dataSourceAwsEfsAccessPoint(), - "aws_efs_access_points": dataSourceAwsEfsAccessPoints(), - "aws_efs_file_system": dataSourceAwsEfsFileSystem(), - "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), - "aws_eip": dataSourceAwsEip(), - "aws_eks_addon": dataSourceAwsEksAddon(), - "aws_eks_cluster": dataSourceAwsEksCluster(), - "aws_eks_cluster_auth": dataSourceAwsEksClusterAuth(), - "aws_elastic_beanstalk_application": dataSourceAwsElasticBeanstalkApplication(), - "aws_elastic_beanstalk_hosted_zone": dataSourceAwsElasticBeanstalkHostedZone(), - "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), - "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), - "aws_elasticache_replication_group": dataSourceAwsElasticacheReplicationGroup(), - "aws_elasticache_user": dataSourceAwsElastiCacheUser(), - "aws_elasticsearch_domain": dataSourceAwsElasticSearchDomain(), - "aws_elb": dataSourceAwsElb(), - "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), - "aws_elb_service_account": dataSourceAwsElbServiceAccount(), - "aws_globalaccelerator_accelerator": dataSourceAwsGlobalAcceleratorAccelerator(), - "aws_glue_connection": dataSourceAwsGlueConnection(), - "aws_glue_data_catalog_encryption_settings": dataSourceAwsGlueDataCatalogEncryptionSettings(), - "aws_glue_script": dataSourceAwsGlueScript(), - "aws_guardduty_detector": dataSourceAwsGuarddutyDetector(), - "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), - "aws_iam_group": dataSourceAwsIAMGroup(), - "aws_iam_instance_profile": dataSourceAwsIAMInstanceProfile(), - "aws_iam_policy": dataSourceAwsIAMPolicy(), - "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), - "aws_iam_role": dataSourceAwsIAMRole(), - "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), - "aws_iam_session_context": dataSourceAwsIAMSessionContext(), - "aws_iam_user": dataSourceAwsIAMUser(), - "aws_identitystore_group": dataSourceAwsIdentityStoreGroup(), - "aws_identitystore_user": dataSourceAwsIdentityStoreUser(), - "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), - "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), - "aws_imagebuilder_image": dataSourceAwsImageBuilderImage(), - "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), - "aws_imagebuilder_image_recipe": dataSourceAwsImageBuilderImageRecipe(), - "aws_imagebuilder_infrastructure_configuration": datasourceAwsImageBuilderInfrastructureConfiguration(), - "aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(), - "aws_instance": dataSourceAwsInstance(), - "aws_instances": dataSourceAwsInstances(), - "aws_internet_gateway": dataSourceAwsInternetGateway(), - "aws_iot_endpoint": dataSourceAwsIotEndpoint(), - "aws_ip_ranges": dataSourceAwsIPRanges(), - "aws_kinesis_stream": dataSourceAwsKinesisStream(), - "aws_kinesis_stream_consumer": dataSourceAwsKinesisStreamConsumer(), - "aws_kms_alias": dataSourceAwsKmsAlias(), - "aws_kms_ciphertext": dataSourceAwsKmsCiphertext(), - "aws_kms_key": dataSourceAwsKmsKey(), - "aws_kms_public_key": dataSourceAwsKmsPublicKey(), - "aws_kms_secret": dataSourceAwsKmsSecret(), - "aws_kms_secrets": dataSourceAwsKmsSecrets(), - "aws_lakeformation_data_lake_settings": dataSourceAwsLakeFormationDataLakeSettings(), - "aws_lakeformation_permissions": dataSourceAwsLakeFormationPermissions(), - "aws_lakeformation_resource": dataSourceAwsLakeFormationResource(), - "aws_lambda_alias": dataSourceAwsLambdaAlias(), - "aws_lambda_code_signing_config": dataSourceAwsLambdaCodeSigningConfig(), - "aws_lambda_function": dataSourceAwsLambdaFunction(), - "aws_lambda_invocation": dataSourceAwsLambdaInvocation(), - "aws_lambda_layer_version": dataSourceAwsLambdaLayerVersion(), - "aws_launch_configuration": dataSourceAwsLaunchConfiguration(), - "aws_launch_template": dataSourceAwsLaunchTemplate(), - "aws_lex_bot_alias": dataSourceAwsLexBotAlias(), - "aws_lex_bot": dataSourceAwsLexBot(), - "aws_lex_intent": dataSourceAwsLexIntent(), - "aws_lex_slot_type": dataSourceAwsLexSlotType(), - "aws_mq_broker": dataSourceAwsMqBroker(), - "aws_msk_cluster": dataSourceAwsMskCluster(), - "aws_msk_configuration": dataSourceAwsMskConfiguration(), - "aws_nat_gateway": dataSourceAwsNatGateway(), - "aws_neptune_orderable_db_instance": dataSourceAwsNeptuneOrderableDbInstance(), - "aws_neptune_engine_version": dataSourceAwsNeptuneEngineVersion(), - "aws_network_acls": dataSourceAwsNetworkAcls(), - "aws_network_interface": dataSourceAwsNetworkInterface(), - "aws_network_interfaces": dataSourceAwsNetworkInterfaces(), - "aws_organizations_delegated_administrators": dataSourceAwsOrganizationsDelegatedAdministrators(), - "aws_organizations_delegated_services": dataSourceAwsOrganizationsDelegatedServices(), - "aws_organizations_organization": dataSourceAwsOrganizationsOrganization(), - "aws_organizations_organizational_units": dataSourceAwsOrganizationsOrganizationalUnits(), - "aws_outposts_outpost": dataSourceAwsOutpostsOutpost(), - "aws_outposts_outpost_instance_type": dataSourceAwsOutpostsOutpostInstanceType(), - "aws_outposts_outpost_instance_types": dataSourceAwsOutpostsOutpostInstanceTypes(), - "aws_outposts_outposts": dataSourceAwsOutpostsOutposts(), - "aws_outposts_site": dataSourceAwsOutpostsSite(), - "aws_outposts_sites": dataSourceAwsOutpostsSites(), - "aws_partition": dataSourceAwsPartition(), - "aws_prefix_list": dataSourceAwsPrefixList(), - "aws_pricing_product": dataSourceAwsPricingProduct(), - "aws_qldb_ledger": dataSourceAwsQLDBLedger(), - "aws_ram_resource_share": dataSourceAwsRamResourceShare(), - "aws_rds_certificate": dataSourceAwsRdsCertificate(), - "aws_rds_cluster": dataSourceAwsRdsCluster(), - "aws_rds_engine_version": dataSourceAwsRdsEngineVersion(), - "aws_rds_orderable_db_instance": dataSourceAwsRdsOrderableDbInstance(), - "aws_redshift_cluster": dataSourceAwsRedshiftCluster(), - "aws_redshift_orderable_cluster": dataSourceAwsRedshiftOrderableCluster(), - "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), - "aws_region": dataSourceAwsRegion(), - "aws_regions": dataSourceAwsRegions(), - "aws_resourcegroupstaggingapi_resources": dataSourceAwsResourceGroupsTaggingAPIResources(), - "aws_route": dataSourceAwsRoute(), - "aws_route_table": dataSourceAwsRouteTable(), - "aws_route_tables": dataSourceAwsRouteTables(), - "aws_route53_delegation_set": dataSourceAwsDelegationSet(), - "aws_route53_resolver_endpoint": dataSourceAwsRoute53ResolverEndpoint(), - "aws_route53_resolver_rule": dataSourceAwsRoute53ResolverRule(), - "aws_route53_resolver_rules": dataSourceAwsRoute53ResolverRules(), - "aws_route53_zone": dataSourceAwsRoute53Zone(), - "aws_s3_bucket": dataSourceAwsS3Bucket(), - "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), - "aws_s3_bucket_objects": dataSourceAwsS3BucketObjects(), - "aws_sagemaker_prebuilt_ecr_image": dataSourceAwsSageMakerPrebuiltECRImage(), - "aws_secretsmanager_secret": dataSourceAwsSecretsManagerSecret(), - "aws_secretsmanager_secret_rotation": dataSourceAwsSecretsManagerSecretRotation(), - "aws_secretsmanager_secret_version": dataSourceAwsSecretsManagerSecretVersion(), - "aws_servicecatalog_constraint": dataSourceAwsServiceCatalogConstraint(), - "aws_servicecatalog_launch_paths": dataSourceAwsServiceCatalogLaunchPaths(), - "aws_servicecatalog_portfolio_constraints": dataSourceAwsServiceCatalogPortfolioConstraints(), - "aws_servicecatalog_portfolio": dataSourceAwsServiceCatalogPortfolio(), - "aws_servicecatalog_product": dataSourceAwsServiceCatalogProduct(), - "aws_servicequotas_service": dataSourceAwsServiceQuotasService(), - "aws_servicequotas_service_quota": dataSourceAwsServiceQuotasServiceQuota(), - "aws_service_discovery_dns_namespace": dataSourceServiceDiscoveryDnsNamespace(), - "aws_sfn_activity": dataSourceAwsSfnActivity(), - "aws_sfn_state_machine": dataSourceAwsSfnStateMachine(), - "aws_signer_signing_job": dataSourceAwsSignerSigningJob(), - "aws_signer_signing_profile": dataSourceAwsSignerSigningProfile(), - "aws_sns_topic": dataSourceAwsSnsTopic(), - "aws_sqs_queue": dataSourceAwsSqsQueue(), - "aws_ssm_document": dataSourceAwsSsmDocument(), - "aws_ssm_parameter": dataSourceAwsSsmParameter(), - "aws_ssm_patch_baseline": dataSourceAwsSsmPatchBaseline(), - "aws_ssoadmin_instances": dataSourceAwsSsoAdminInstances(), - "aws_ssoadmin_permission_set": dataSourceAwsSsoAdminPermissionSet(), - "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), - "aws_subnet": dataSourceAwsSubnet(), - "aws_subnet_ids": dataSourceAwsSubnetIDs(), - "aws_transfer_server": dataSourceAwsTransferServer(), - "aws_vpcs": dataSourceAwsVpcs(), - "aws_security_group": dataSourceAwsSecurityGroup(), - "aws_security_groups": dataSourceAwsSecurityGroups(), - "aws_vpc": dataSourceAwsVpc(), - "aws_vpc_dhcp_options": dataSourceAwsVpcDhcpOptions(), - "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), - "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), - "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), - "aws_vpc_peering_connections": dataSourceAwsVpcPeeringConnections(), - "aws_vpn_gateway": dataSourceAwsVpnGateway(), - "aws_waf_ipset": dataSourceAwsWafIpSet(), - "aws_waf_rule": dataSourceAwsWafRule(), - "aws_waf_rate_based_rule": dataSourceAwsWafRateBasedRule(), - "aws_waf_web_acl": dataSourceAwsWafWebAcl(), - "aws_wafregional_ipset": dataSourceAwsWafRegionalIpSet(), - "aws_wafregional_rule": dataSourceAwsWafRegionalRule(), - "aws_wafregional_rate_based_rule": dataSourceAwsWafRegionalRateBasedRule(), - "aws_wafregional_web_acl": dataSourceAwsWafRegionalWebAcl(), - "aws_wafv2_ip_set": dataSourceAwsWafv2IPSet(), - "aws_wafv2_regex_pattern_set": dataSourceAwsWafv2RegexPatternSet(), - "aws_wafv2_rule_group": dataSourceAwsWafv2RuleGroup(), - "aws_wafv2_web_acl": dataSourceAwsWafv2WebACL(), - "aws_workspaces_bundle": dataSourceAwsWorkspacesBundle(), - "aws_workspaces_directory": dataSourceAwsWorkspacesDirectory(), - "aws_workspaces_image": dataSourceAwsWorkspacesImage(), - "aws_workspaces_workspace": dataSourceAwsWorkspacesWorkspace(), - - // Adding the Aliases for the ALB -> LB Rename - "aws_lb": dataSourceAwsLb(), - "aws_alb": dataSourceAwsLb(), - "aws_lb_listener": dataSourceAwsLbListener(), - "aws_alb_listener": dataSourceAwsLbListener(), - "aws_lb_target_group": dataSourceAwsLbTargetGroup(), - "aws_alb_target_group": dataSourceAwsLbTargetGroup(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "aws_accessanalyzer_analyzer": resourceAwsAccessAnalyzerAnalyzer(), - "aws_acm_certificate": resourceAwsAcmCertificate(), - "aws_acm_certificate_validation": resourceAwsAcmCertificateValidation(), - "aws_acmpca_certificate_authority": resourceAwsAcmpcaCertificateAuthority(), - "aws_acmpca_certificate_authority_certificate": resourceAwsAcmpcaCertificateAuthorityCertificate(), - "aws_acmpca_certificate": resourceAwsAcmpcaCertificate(), - "aws_ami": resourceAwsAmi(), - "aws_ami_copy": resourceAwsAmiCopy(), - "aws_ami_from_instance": resourceAwsAmiFromInstance(), - "aws_ami_launch_permission": resourceAwsAmiLaunchPermission(), - "aws_amplify_app": resourceAwsAmplifyApp(), - "aws_amplify_backend_environment": resourceAwsAmplifyBackendEnvironment(), - "aws_amplify_branch": resourceAwsAmplifyBranch(), - "aws_amplify_domain_association": resourceAwsAmplifyDomainAssociation(), - "aws_amplify_webhook": resourceAwsAmplifyWebhook(), - "aws_api_gateway_account": resourceAwsApiGatewayAccount(), - "aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(), - "aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(), - "aws_api_gateway_base_path_mapping": resourceAwsApiGatewayBasePathMapping(), - "aws_api_gateway_client_certificate": resourceAwsApiGatewayClientCertificate(), - "aws_api_gateway_deployment": resourceAwsApiGatewayDeployment(), - "aws_api_gateway_documentation_part": resourceAwsApiGatewayDocumentationPart(), - "aws_api_gateway_documentation_version": resourceAwsApiGatewayDocumentationVersion(), - "aws_api_gateway_domain_name": resourceAwsApiGatewayDomainName(), - "aws_api_gateway_gateway_response": resourceAwsApiGatewayGatewayResponse(), - "aws_api_gateway_integration": resourceAwsApiGatewayIntegration(), - "aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(), - "aws_api_gateway_method": resourceAwsApiGatewayMethod(), - "aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(), - "aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(), - "aws_api_gateway_model": resourceAwsApiGatewayModel(), - "aws_api_gateway_request_validator": resourceAwsApiGatewayRequestValidator(), - "aws_api_gateway_resource": resourceAwsApiGatewayResource(), - "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), - "aws_api_gateway_rest_api_policy": resourceAwsApiGatewayRestApiPolicy(), - "aws_api_gateway_stage": resourceAwsApiGatewayStage(), - "aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(), - "aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(), - "aws_api_gateway_vpc_link": resourceAwsApiGatewayVpcLink(), - "aws_apigatewayv2_api": resourceAwsApiGatewayV2Api(), - "aws_apigatewayv2_api_mapping": resourceAwsApiGatewayV2ApiMapping(), - "aws_apigatewayv2_authorizer": resourceAwsApiGatewayV2Authorizer(), - "aws_apigatewayv2_deployment": resourceAwsApiGatewayV2Deployment(), - "aws_apigatewayv2_domain_name": resourceAwsApiGatewayV2DomainName(), - "aws_apigatewayv2_integration": resourceAwsApiGatewayV2Integration(), - "aws_apigatewayv2_integration_response": resourceAwsApiGatewayV2IntegrationResponse(), - "aws_apigatewayv2_model": resourceAwsApiGatewayV2Model(), - "aws_apigatewayv2_route": resourceAwsApiGatewayV2Route(), - "aws_apigatewayv2_route_response": resourceAwsApiGatewayV2RouteResponse(), - "aws_apigatewayv2_stage": resourceAwsApiGatewayV2Stage(), - "aws_apigatewayv2_vpc_link": resourceAwsApiGatewayV2VpcLink(), - "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), - "aws_appautoscaling_target": resourceAwsAppautoscalingTarget(), - "aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(), - "aws_appautoscaling_scheduled_action": resourceAwsAppautoscalingScheduledAction(), - "aws_appconfig_application": resourceAwsAppconfigApplication(), - "aws_appconfig_configuration_profile": resourceAwsAppconfigConfigurationProfile(), - "aws_appconfig_deployment": resourceAwsAppconfigDeployment(), - "aws_appconfig_deployment_strategy": resourceAwsAppconfigDeploymentStrategy(), - "aws_appconfig_environment": resourceAwsAppconfigEnvironment(), - "aws_appconfig_hosted_configuration_version": resourceAwsAppconfigHostedConfigurationVersion(), - "aws_appmesh_gateway_route": resourceAwsAppmeshGatewayRoute(), - "aws_appmesh_mesh": resourceAwsAppmeshMesh(), - "aws_appmesh_route": resourceAwsAppmeshRoute(), - "aws_appmesh_virtual_gateway": resourceAwsAppmeshVirtualGateway(), - "aws_appmesh_virtual_node": resourceAwsAppmeshVirtualNode(), - "aws_appmesh_virtual_router": resourceAwsAppmeshVirtualRouter(), - "aws_appmesh_virtual_service": resourceAwsAppmeshVirtualService(), - "aws_apprunner_auto_scaling_configuration_version": resourceAwsAppRunnerAutoScalingConfigurationVersion(), - "aws_apprunner_connection": resourceAwsAppRunnerConnection(), - "aws_apprunner_custom_domain_association": resourceAwsAppRunnerCustomDomainAssociation(), - "aws_apprunner_service": resourceAwsAppRunnerService(), - "aws_appsync_api_key": resourceAwsAppsyncApiKey(), - "aws_appsync_datasource": resourceAwsAppsyncDatasource(), - "aws_appsync_function": resourceAwsAppsyncFunction(), - "aws_appsync_graphql_api": resourceAwsAppsyncGraphqlApi(), - "aws_appsync_resolver": resourceAwsAppsyncResolver(), - "aws_athena_database": resourceAwsAthenaDatabase(), - "aws_athena_named_query": resourceAwsAthenaNamedQuery(), - "aws_athena_workgroup": resourceAwsAthenaWorkgroup(), - "aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(), - "aws_autoscaling_group": resourceAwsAutoscalingGroup(), - "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), - "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), - "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), - "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(), - "aws_autoscalingplans_scaling_plan": resourceAwsAutoScalingPlansScalingPlan(), - "aws_backup_global_settings": resourceAwsBackupGlobalSettings(), - "aws_backup_plan": resourceAwsBackupPlan(), - "aws_backup_region_settings": resourceAwsBackupRegionSettings(), - "aws_backup_selection": resourceAwsBackupSelection(), - "aws_backup_vault": resourceAwsBackupVault(), - "aws_backup_vault_notifications": resourceAwsBackupVaultNotifications(), - "aws_backup_vault_policy": resourceAwsBackupVaultPolicy(), - "aws_budgets_budget": resourceAwsBudgetsBudget(), - "aws_budgets_budget_action": resourceAwsBudgetsBudgetAction(), - "aws_cloud9_environment_ec2": resourceAwsCloud9EnvironmentEc2(), - "aws_cloudformation_stack": resourceAwsCloudFormationStack(), - "aws_cloudformation_stack_set": resourceAwsCloudFormationStackSet(), - "aws_cloudformation_stack_set_instance": resourceAwsCloudFormationStackSetInstance(), - "aws_cloudformation_type": resourceAwsCloudFormationType(), - "aws_cloudfront_cache_policy": resourceAwsCloudFrontCachePolicy(), - "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), - "aws_cloudfront_function": resourceAwsCloudFrontFunction(), - "aws_cloudfront_key_group": resourceAwsCloudFrontKeyGroup(), - "aws_cloudfront_monitoring_subscription": resourceAwsCloudFrontMonitoringSubscription(), - "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), - "aws_cloudfront_origin_request_policy": resourceAwsCloudFrontOriginRequestPolicy(), - "aws_cloudfront_public_key": resourceAwsCloudFrontPublicKey(), - "aws_cloudfront_realtime_log_config": resourceAwsCloudFrontRealtimeLogConfig(), - "aws_cloudtrail": resourceAwsCloudTrail(), - "aws_cloudwatch_event_bus": resourceAwsCloudWatchEventBus(), - "aws_cloudwatch_event_bus_policy": resourceAwsCloudWatchEventBusPolicy(), - "aws_cloudwatch_event_permission": resourceAwsCloudWatchEventPermission(), - "aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(), - "aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(), - "aws_cloudwatch_event_archive": resourceAwsCloudWatchEventArchive(), - "aws_cloudwatch_event_connection": resourceAwsCloudWatchEventConnection(), - "aws_cloudwatch_event_api_destination": resourceAwsCloudWatchEventApiDestination(), - "aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(), - "aws_cloudwatch_log_destination_policy": resourceAwsCloudWatchLogDestinationPolicy(), - "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), - "aws_cloudwatch_log_metric_filter": resourceAwsCloudWatchLogMetricFilter(), - "aws_cloudwatch_log_resource_policy": resourceAwsCloudWatchLogResourcePolicy(), - "aws_cloudwatch_log_stream": resourceAwsCloudWatchLogStream(), - "aws_cloudwatch_log_subscription_filter": resourceAwsCloudwatchLogSubscriptionFilter(), - "aws_config_aggregate_authorization": resourceAwsConfigAggregateAuthorization(), - "aws_config_config_rule": resourceAwsConfigConfigRule(), - "aws_config_configuration_aggregator": resourceAwsConfigConfigurationAggregator(), - "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), - "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), - "aws_config_conformance_pack": resourceAwsConfigConformancePack(), - "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), - "aws_config_organization_conformance_pack": resourceAwsConfigOrganizationConformancePack(), - "aws_config_organization_custom_rule": resourceAwsConfigOrganizationCustomRule(), - "aws_config_organization_managed_rule": resourceAwsConfigOrganizationManagedRule(), - "aws_config_remediation_configuration": resourceAwsConfigRemediationConfiguration(), - "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), - "aws_cognito_identity_pool_roles_attachment": resourceAwsCognitoIdentityPoolRolesAttachment(), - "aws_cognito_identity_provider": resourceAwsCognitoIdentityProvider(), - "aws_cognito_resource_server": resourceAwsCognitoResourceServer(), - "aws_cognito_user_group": resourceAwsCognitoUserGroup(), - "aws_cognito_user_pool": resourceAwsCognitoUserPool(), - "aws_cognito_user_pool_client": resourceAwsCognitoUserPoolClient(), - "aws_cognito_user_pool_domain": resourceAwsCognitoUserPoolDomain(), - "aws_cognito_user_pool_ui_customization": resourceAwsCognitoUserPoolUICustomization(), - "aws_cloudhsm_v2_cluster": resourceAwsCloudHsmV2Cluster(), - "aws_cloudhsm_v2_hsm": resourceAwsCloudHsmV2Hsm(), - "aws_cloudwatch_composite_alarm": resourceAwsCloudWatchCompositeAlarm(), - "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), - "aws_cloudwatch_dashboard": resourceAwsCloudWatchDashboard(), - "aws_cloudwatch_metric_stream": resourceAwsCloudWatchMetricStream(), - "aws_cloudwatch_query_definition": resourceAwsCloudWatchQueryDefinition(), - "aws_codedeploy_app": resourceAwsCodeDeployApp(), - "aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(), - "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), - "aws_codecommit_repository": resourceAwsCodeCommitRepository(), - "aws_codecommit_trigger": resourceAwsCodeCommitTrigger(), - "aws_codeartifact_domain": resourceAwsCodeArtifactDomain(), - "aws_codeartifact_domain_permissions_policy": resourceAwsCodeArtifactDomainPermissionsPolicy(), - "aws_codeartifact_repository": resourceAwsCodeArtifactRepository(), - "aws_codeartifact_repository_permissions_policy": resourceAwsCodeArtifactRepositoryPermissionsPolicy(), - "aws_codebuild_project": resourceAwsCodeBuildProject(), - "aws_codebuild_report_group": resourceAwsCodeBuildReportGroup(), - "aws_codebuild_source_credential": resourceAwsCodeBuildSourceCredential(), - "aws_codebuild_webhook": resourceAwsCodeBuildWebhook(), - "aws_codepipeline": resourceAwsCodePipeline(), - "aws_codepipeline_webhook": resourceAwsCodePipelineWebhook(), - "aws_codestarconnections_connection": resourceAwsCodeStarConnectionsConnection(), - "aws_codestarconnections_host": resourceAwsCodeStarConnectionsHost(), - "aws_codestarnotifications_notification_rule": resourceAwsCodeStarNotificationsNotificationRule(), - "aws_cur_report_definition": resourceAwsCurReportDefinition(), - "aws_customer_gateway": resourceAwsCustomerGateway(), - "aws_datapipeline_pipeline": resourceAwsDataPipelinePipeline(), - "aws_datasync_agent": resourceAwsDataSyncAgent(), - "aws_datasync_location_efs": resourceAwsDataSyncLocationEfs(), - "aws_datasync_location_fsx_windows_file_system": resourceAwsDataSyncLocationFsxWindowsFileSystem(), - "aws_datasync_location_nfs": resourceAwsDataSyncLocationNfs(), - "aws_datasync_location_s3": resourceAwsDataSyncLocationS3(), - "aws_datasync_location_smb": resourceAwsDataSyncLocationSmb(), - "aws_datasync_task": resourceAwsDataSyncTask(), - "aws_dax_cluster": resourceAwsDaxCluster(), - "aws_dax_parameter_group": resourceAwsDaxParameterGroup(), - "aws_dax_subnet_group": resourceAwsDaxSubnetGroup(), - "aws_db_cluster_snapshot": resourceAwsDbClusterSnapshot(), - "aws_db_event_subscription": resourceAwsDbEventSubscription(), - "aws_db_instance": resourceAwsDbInstance(), - "aws_db_instance_role_association": resourceAwsDbInstanceRoleAssociation(), - "aws_db_option_group": resourceAwsDbOptionGroup(), - "aws_db_parameter_group": resourceAwsDbParameterGroup(), - "aws_db_proxy": resourceAwsDbProxy(), - "aws_db_proxy_default_target_group": resourceAwsDbProxyDefaultTargetGroup(), - "aws_db_proxy_endpoint": resourceAwsDbProxyEndpoint(), - "aws_db_proxy_target": resourceAwsDbProxyTarget(), - "aws_db_security_group": resourceAwsDbSecurityGroup(), - "aws_db_snapshot": resourceAwsDbSnapshot(), - "aws_db_subnet_group": resourceAwsDbSubnetGroup(), - "aws_devicefarm_project": resourceAwsDevicefarmProject(), - "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), - "aws_directory_service_conditional_forwarder": resourceAwsDirectoryServiceConditionalForwarder(), - "aws_directory_service_log_subscription": resourceAwsDirectoryServiceLogSubscription(), - "aws_dlm_lifecycle_policy": resourceAwsDlmLifecyclePolicy(), - "aws_dms_certificate": resourceAwsDmsCertificate(), - "aws_dms_endpoint": resourceAwsDmsEndpoint(), - "aws_dms_event_subscription": resourceAwsDmsEventSubscription(), - "aws_dms_replication_instance": resourceAwsDmsReplicationInstance(), - "aws_dms_replication_subnet_group": resourceAwsDmsReplicationSubnetGroup(), - "aws_dms_replication_task": resourceAwsDmsReplicationTask(), - "aws_docdb_cluster": resourceAwsDocDBCluster(), - "aws_docdb_cluster_instance": resourceAwsDocDBClusterInstance(), - "aws_docdb_cluster_parameter_group": resourceAwsDocDBClusterParameterGroup(), - "aws_docdb_cluster_snapshot": resourceAwsDocDBClusterSnapshot(), - "aws_docdb_subnet_group": resourceAwsDocDBSubnetGroup(), - "aws_dx_bgp_peer": resourceAwsDxBgpPeer(), - "aws_dx_connection": resourceAwsDxConnection(), - "aws_dx_connection_association": resourceAwsDxConnectionAssociation(), - "aws_dx_gateway": resourceAwsDxGateway(), - "aws_dx_gateway_association": resourceAwsDxGatewayAssociation(), - "aws_dx_gateway_association_proposal": resourceAwsDxGatewayAssociationProposal(), - "aws_dx_hosted_private_virtual_interface": resourceAwsDxHostedPrivateVirtualInterface(), - "aws_dx_hosted_private_virtual_interface_accepter": resourceAwsDxHostedPrivateVirtualInterfaceAccepter(), - "aws_dx_hosted_public_virtual_interface": resourceAwsDxHostedPublicVirtualInterface(), - "aws_dx_hosted_public_virtual_interface_accepter": resourceAwsDxHostedPublicVirtualInterfaceAccepter(), - "aws_dx_hosted_transit_virtual_interface": resourceAwsDxHostedTransitVirtualInterface(), - "aws_dx_hosted_transit_virtual_interface_accepter": resourceAwsDxHostedTransitVirtualInterfaceAccepter(), - "aws_dx_lag": resourceAwsDxLag(), - "aws_dx_private_virtual_interface": resourceAwsDxPrivateVirtualInterface(), - "aws_dx_public_virtual_interface": resourceAwsDxPublicVirtualInterface(), - "aws_dx_transit_virtual_interface": resourceAwsDxTransitVirtualInterface(), - "aws_dynamodb_table": resourceAwsDynamoDbTable(), - "aws_dynamodb_table_item": resourceAwsDynamoDbTableItem(), - "aws_dynamodb_global_table": resourceAwsDynamoDbGlobalTable(), - "aws_dynamodb_kinesis_streaming_destination": resourceAwsDynamoDbKinesisStreamingDestination(), - "aws_ebs_default_kms_key": resourceAwsEbsDefaultKmsKey(), - "aws_ebs_encryption_by_default": resourceAwsEbsEncryptionByDefault(), - "aws_ebs_snapshot": resourceAwsEbsSnapshot(), - "aws_ebs_snapshot_copy": resourceAwsEbsSnapshotCopy(), - "aws_ebs_snapshot_import": resourceAwsEbsSnapshotImport(), - "aws_ebs_volume": resourceAwsEbsVolume(), - "aws_ec2_availability_zone_group": resourceAwsEc2AvailabilityZoneGroup(), - "aws_ec2_capacity_reservation": resourceAwsEc2CapacityReservation(), - "aws_ec2_carrier_gateway": resourceAwsEc2CarrierGateway(), - "aws_ec2_client_vpn_authorization_rule": resourceAwsEc2ClientVpnAuthorizationRule(), - "aws_ec2_client_vpn_endpoint": resourceAwsEc2ClientVpnEndpoint(), - "aws_ec2_client_vpn_network_association": resourceAwsEc2ClientVpnNetworkAssociation(), - "aws_ec2_client_vpn_route": resourceAwsEc2ClientVpnRoute(), - "aws_ec2_fleet": resourceAwsEc2Fleet(), - "aws_ec2_local_gateway_route": resourceAwsEc2LocalGatewayRoute(), - "aws_ec2_local_gateway_route_table_vpc_association": resourceAwsEc2LocalGatewayRouteTableVpcAssociation(), - "aws_ec2_managed_prefix_list": resourceAwsEc2ManagedPrefixList(), - "aws_ec2_tag": resourceAwsEc2Tag(), - "aws_ec2_traffic_mirror_filter": resourceAwsEc2TrafficMirrorFilter(), - "aws_ec2_traffic_mirror_filter_rule": resourceAwsEc2TrafficMirrorFilterRule(), - "aws_ec2_traffic_mirror_target": resourceAwsEc2TrafficMirrorTarget(), - "aws_ec2_traffic_mirror_session": resourceAwsEc2TrafficMirrorSession(), - "aws_ec2_transit_gateway": resourceAwsEc2TransitGateway(), - "aws_ec2_transit_gateway_peering_attachment": resourceAwsEc2TransitGatewayPeeringAttachment(), - "aws_ec2_transit_gateway_peering_attachment_accepter": resourceAwsEc2TransitGatewayPeeringAttachmentAccepter(), - "aws_ec2_transit_gateway_prefix_list_reference": resourceAwsEc2TransitGatewayPrefixListReference(), - "aws_ec2_transit_gateway_route": resourceAwsEc2TransitGatewayRoute(), - "aws_ec2_transit_gateway_route_table": resourceAwsEc2TransitGatewayRouteTable(), - "aws_ec2_transit_gateway_route_table_association": resourceAwsEc2TransitGatewayRouteTableAssociation(), - "aws_ec2_transit_gateway_route_table_propagation": resourceAwsEc2TransitGatewayRouteTablePropagation(), - "aws_ec2_transit_gateway_vpc_attachment": resourceAwsEc2TransitGatewayVpcAttachment(), - "aws_ec2_transit_gateway_vpc_attachment_accepter": resourceAwsEc2TransitGatewayVpcAttachmentAccepter(), - "aws_ecr_lifecycle_policy": resourceAwsEcrLifecyclePolicy(), - "aws_ecrpublic_repository": resourceAwsEcrPublicRepository(), - "aws_ecr_registry_policy": resourceAwsEcrRegistryPolicy(), - "aws_ecr_replication_configuration": resourceAwsEcrReplicationConfiguration(), - "aws_ecr_repository": resourceAwsEcrRepository(), - "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), - "aws_ecs_capacity_provider": resourceAwsEcsCapacityProvider(), - "aws_ecs_cluster": resourceAwsEcsCluster(), - "aws_ecs_service": resourceAwsEcsService(), - "aws_ecs_task_definition": resourceAwsEcsTaskDefinition(), - "aws_efs_access_point": resourceAwsEfsAccessPoint(), - "aws_efs_backup_policy": resourceAwsEfsBackupPolicy(), - "aws_efs_file_system": resourceAwsEfsFileSystem(), - "aws_efs_file_system_policy": resourceAwsEfsFileSystemPolicy(), - "aws_efs_mount_target": resourceAwsEfsMountTarget(), - "aws_egress_only_internet_gateway": resourceAwsEgressOnlyInternetGateway(), - "aws_eip": resourceAwsEip(), - "aws_eip_association": resourceAwsEipAssociation(), - "aws_eks_cluster": resourceAwsEksCluster(), - "aws_eks_addon": resourceAwsEksAddon(), - "aws_eks_fargate_profile": resourceAwsEksFargateProfile(), - "aws_eks_identity_provider_config": resourceAwsEksIdentityProviderConfig(), - "aws_eks_node_group": resourceAwsEksNodeGroup(), - "aws_elasticache_cluster": resourceAwsElasticacheCluster(), - "aws_elasticache_global_replication_group": resourceAwsElasticacheGlobalReplicationGroup(), - "aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(), - "aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(), - "aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(), - "aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(), - "aws_elasticache_user": resourceAwsElasticacheUser(), - "aws_elasticache_user_group": resourceAwsElasticacheUserGroup(), - "aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(), - "aws_elastic_beanstalk_application_version": resourceAwsElasticBeanstalkApplicationVersion(), - "aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(), - "aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(), - "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), - "aws_elasticsearch_domain_policy": resourceAwsElasticSearchDomainPolicy(), - "aws_elasticsearch_domain_saml_options": resourceAwsElasticSearchDomainSAMLOptions(), - "aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(), - "aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(), - "aws_elb": resourceAwsElb(), - "aws_elb_attachment": resourceAwsElbAttachment(), - "aws_emr_cluster": resourceAwsEMRCluster(), - "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), - "aws_emr_instance_fleet": resourceAwsEMRInstanceFleet(), - "aws_emr_managed_scaling_policy": resourceAwsEMRManagedScalingPolicy(), - "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), - "aws_flow_log": resourceAwsFlowLog(), - "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), - "aws_fsx_windows_file_system": resourceAwsFsxWindowsFileSystem(), - "aws_fms_admin_account": resourceAwsFmsAdminAccount(), - "aws_fms_policy": resourceAwsFmsPolicy(), - "aws_gamelift_alias": resourceAwsGameliftAlias(), - "aws_gamelift_build": resourceAwsGameliftBuild(), - "aws_gamelift_fleet": resourceAwsGameliftFleet(), - "aws_gamelift_game_session_queue": resourceAwsGameliftGameSessionQueue(), - "aws_glacier_vault": resourceAwsGlacierVault(), - "aws_glacier_vault_lock": resourceAwsGlacierVaultLock(), - "aws_globalaccelerator_accelerator": resourceAwsGlobalAcceleratorAccelerator(), - "aws_globalaccelerator_endpoint_group": resourceAwsGlobalAcceleratorEndpointGroup(), - "aws_globalaccelerator_listener": resourceAwsGlobalAcceleratorListener(), - "aws_glue_catalog_database": resourceAwsGlueCatalogDatabase(), - "aws_glue_catalog_table": resourceAwsGlueCatalogTable(), - "aws_glue_classifier": resourceAwsGlueClassifier(), - "aws_glue_connection": resourceAwsGlueConnection(), - "aws_glue_dev_endpoint": resourceAwsGlueDevEndpoint(), - "aws_glue_crawler": resourceAwsGlueCrawler(), - "aws_glue_data_catalog_encryption_settings": resourceAwsGlueDataCatalogEncryptionSettings(), - "aws_glue_job": resourceAwsGlueJob(), - "aws_glue_ml_transform": resourceAwsGlueMLTransform(), - "aws_glue_partition": resourceAwsGluePartition(), - "aws_glue_registry": resourceAwsGlueRegistry(), - "aws_glue_resource_policy": resourceAwsGlueResourcePolicy(), - "aws_glue_schema": resourceAwsGlueSchema(), - "aws_glue_security_configuration": resourceAwsGlueSecurityConfiguration(), - "aws_glue_trigger": resourceAwsGlueTrigger(), - "aws_glue_user_defined_function": resourceAwsGlueUserDefinedFunction(), - "aws_glue_workflow": resourceAwsGlueWorkflow(), - "aws_guardduty_detector": resourceAwsGuardDutyDetector(), - "aws_guardduty_filter": resourceAwsGuardDutyFilter(), - "aws_guardduty_invite_accepter": resourceAwsGuardDutyInviteAccepter(), - "aws_guardduty_ipset": resourceAwsGuardDutyIpset(), - "aws_guardduty_member": resourceAwsGuardDutyMember(), - "aws_guardduty_organization_admin_account": resourceAwsGuardDutyOrganizationAdminAccount(), - "aws_guardduty_organization_configuration": resourceAwsGuardDutyOrganizationConfiguration(), - "aws_guardduty_publishing_destination": resourceAwsGuardDutyPublishingDestination(), - "aws_guardduty_threatintelset": resourceAwsGuardDutyThreatintelset(), - "aws_iam_access_key": resourceAwsIamAccessKey(), - "aws_iam_account_alias": resourceAwsIamAccountAlias(), - "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), - "aws_iam_group_policy": resourceAwsIamGroupPolicy(), - "aws_iam_group": resourceAwsIamGroup(), - "aws_iam_group_membership": resourceAwsIamGroupMembership(), - "aws_iam_group_policy_attachment": resourceAwsIamGroupPolicyAttachment(), - "aws_iam_instance_profile": resourceAwsIamInstanceProfile(), - "aws_iam_openid_connect_provider": resourceAwsIamOpenIDConnectProvider(), - "aws_iam_policy": resourceAwsIamPolicy(), - "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), - "aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(), - "aws_iam_role_policy": resourceAwsIamRolePolicy(), - "aws_iam_role": resourceAwsIamRole(), - "aws_iam_saml_provider": resourceAwsIamSamlProvider(), - "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), - "aws_iam_service_linked_role": resourceAwsIamServiceLinkedRole(), - "aws_iam_user_group_membership": resourceAwsIamUserGroupMembership(), - "aws_iam_user_policy_attachment": resourceAwsIamUserPolicyAttachment(), - "aws_iam_user_policy": resourceAwsIamUserPolicy(), - "aws_iam_user_ssh_key": resourceAwsIamUserSshKey(), - "aws_iam_user": resourceAwsIamUser(), - "aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(), - "aws_imagebuilder_component": resourceAwsImageBuilderComponent(), - "aws_imagebuilder_distribution_configuration": resourceAwsImageBuilderDistributionConfiguration(), - "aws_imagebuilder_image": resourceAwsImageBuilderImage(), - "aws_imagebuilder_image_pipeline": resourceAwsImageBuilderImagePipeline(), - "aws_imagebuilder_image_recipe": resourceAwsImageBuilderImageRecipe(), - "aws_imagebuilder_infrastructure_configuration": resourceAwsImageBuilderInfrastructureConfiguration(), - "aws_inspector_assessment_target": resourceAWSInspectorAssessmentTarget(), - "aws_inspector_assessment_template": resourceAWSInspectorAssessmentTemplate(), - "aws_inspector_resource_group": resourceAWSInspectorResourceGroup(), - "aws_instance": resourceAwsInstance(), - "aws_internet_gateway": resourceAwsInternetGateway(), - "aws_iot_certificate": resourceAwsIotCertificate(), - "aws_iot_policy": resourceAwsIotPolicy(), - "aws_iot_policy_attachment": resourceAwsIotPolicyAttachment(), - "aws_iot_thing": resourceAwsIotThing(), - "aws_iot_thing_principal_attachment": resourceAwsIotThingPrincipalAttachment(), - "aws_iot_thing_type": resourceAwsIotThingType(), - "aws_iot_topic_rule": resourceAwsIotTopicRule(), - "aws_iot_role_alias": resourceAwsIotRoleAlias(), - "aws_key_pair": resourceAwsKeyPair(), - "aws_kinesis_analytics_application": resourceAwsKinesisAnalyticsApplication(), - "aws_kinesisanalyticsv2_application": resourceAwsKinesisAnalyticsV2Application(), - "aws_kinesisanalyticsv2_application_snapshot": resourceAwsKinesisAnalyticsV2ApplicationSnapshot(), - "aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(), - "aws_kinesis_stream": resourceAwsKinesisStream(), - "aws_kinesis_stream_consumer": resourceAwsKinesisStreamConsumer(), - "aws_kinesis_video_stream": resourceAwsKinesisVideoStream(), - "aws_kms_alias": resourceAwsKmsAlias(), - "aws_kms_external_key": resourceAwsKmsExternalKey(), - "aws_kms_grant": resourceAwsKmsGrant(), - "aws_kms_key": resourceAwsKmsKey(), - "aws_kms_ciphertext": resourceAwsKmsCiphertext(), - "aws_lakeformation_data_lake_settings": resourceAwsLakeFormationDataLakeSettings(), - "aws_lakeformation_permissions": resourceAwsLakeFormationPermissions(), - "aws_lakeformation_resource": resourceAwsLakeFormationResource(), - "aws_lambda_alias": resourceAwsLambdaAlias(), - "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), - "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), - "aws_lambda_function_event_invoke_config": resourceAwsLambdaFunctionEventInvokeConfig(), - "aws_lambda_function": resourceAwsLambdaFunction(), - "aws_lambda_layer_version": resourceAwsLambdaLayerVersion(), - "aws_lambda_permission": resourceAwsLambdaPermission(), - "aws_lambda_provisioned_concurrency_config": resourceAwsLambdaProvisionedConcurrencyConfig(), - "aws_launch_configuration": resourceAwsLaunchConfiguration(), - "aws_launch_template": resourceAwsLaunchTemplate(), - "aws_lex_bot": resourceAwsLexBot(), - "aws_lex_bot_alias": resourceAwsLexBotAlias(), - "aws_lex_intent": resourceAwsLexIntent(), - "aws_lex_slot_type": resourceAwsLexSlotType(), - "aws_licensemanager_association": resourceAwsLicenseManagerAssociation(), - "aws_licensemanager_license_configuration": resourceAwsLicenseManagerLicenseConfiguration(), - "aws_lightsail_domain": resourceAwsLightsailDomain(), - "aws_lightsail_instance": resourceAwsLightsailInstance(), - "aws_lightsail_instance_public_ports": resourceAwsLightsailInstancePublicPorts(), - "aws_lightsail_key_pair": resourceAwsLightsailKeyPair(), - "aws_lightsail_static_ip": resourceAwsLightsailStaticIp(), - "aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(), - "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), - "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), - "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), - "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), - "aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(), - "aws_macie2_account": resourceAwsMacie2Account(), - "aws_macie2_classification_job": resourceAwsMacie2ClassificationJob(), - "aws_macie2_custom_data_identifier": resourceAwsMacie2CustomDataIdentifier(), - "aws_macie2_findings_filter": resourceAwsMacie2FindingsFilter(), - "aws_macie2_invitation_accepter": resourceAwsMacie2InvitationAccepter(), - "aws_macie2_member": resourceAwsMacie2Member(), - "aws_macie2_organization_admin_account": resourceAwsMacie2OrganizationAdminAccount(), - "aws_macie_member_account_association": resourceAwsMacieMemberAccountAssociation(), - "aws_macie_s3_bucket_association": resourceAwsMacieS3BucketAssociation(), - "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), - "aws_mq_broker": resourceAwsMqBroker(), - "aws_mq_configuration": resourceAwsMqConfiguration(), - "aws_media_convert_queue": resourceAwsMediaConvertQueue(), - "aws_media_package_channel": resourceAwsMediaPackageChannel(), - "aws_media_store_container": resourceAwsMediaStoreContainer(), - "aws_media_store_container_policy": resourceAwsMediaStoreContainerPolicy(), - "aws_msk_cluster": resourceAwsMskCluster(), - "aws_msk_configuration": resourceAwsMskConfiguration(), - "aws_msk_scram_secret_association": resourceAwsMskScramSecretAssociation(), - "aws_mwaa_environment": resourceAwsMwaaEnvironment(), - "aws_nat_gateway": resourceAwsNatGateway(), - "aws_network_acl": resourceAwsNetworkAcl(), - "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), - "aws_neptune_cluster": resourceAwsNeptuneCluster(), - "aws_neptune_cluster_endpoint": resourceAwsNeptuneClusterEndpoint(), - "aws_neptune_cluster_instance": resourceAwsNeptuneClusterInstance(), - "aws_neptune_cluster_parameter_group": resourceAwsNeptuneClusterParameterGroup(), - "aws_neptune_cluster_snapshot": resourceAwsNeptuneClusterSnapshot(), - "aws_neptune_event_subscription": resourceAwsNeptuneEventSubscription(), - "aws_neptune_parameter_group": resourceAwsNeptuneParameterGroup(), - "aws_neptune_subnet_group": resourceAwsNeptuneSubnetGroup(), - "aws_network_acl_rule": resourceAwsNetworkAclRule(), - "aws_network_interface": resourceAwsNetworkInterface(), - "aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(), - "aws_networkfirewall_firewall": resourceAwsNetworkFirewallFirewall(), - "aws_networkfirewall_firewall_policy": resourceAwsNetworkFirewallFirewallPolicy(), - "aws_networkfirewall_logging_configuration": resourceAwsNetworkFirewallLoggingConfiguration(), - "aws_networkfirewall_resource_policy": resourceAwsNetworkFirewallResourcePolicy(), - "aws_networkfirewall_rule_group": resourceAwsNetworkFirewallRuleGroup(), - "aws_opsworks_application": resourceAwsOpsworksApplication(), - "aws_opsworks_stack": resourceAwsOpsworksStack(), - "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), - "aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(), - "aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(), - "aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(), - "aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(), - "aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(), - "aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(), - "aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(), - "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), - "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), - "aws_opsworks_instance": resourceAwsOpsworksInstance(), - "aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(), - "aws_opsworks_permission": resourceAwsOpsworksPermission(), - "aws_opsworks_rds_db_instance": resourceAwsOpsworksRdsDbInstance(), - "aws_organizations_organization": resourceAwsOrganizationsOrganization(), - "aws_organizations_account": resourceAwsOrganizationsAccount(), - "aws_organizations_delegated_administrator": resourceAwsOrganizationsDelegatedAdministrator(), - "aws_organizations_policy": resourceAwsOrganizationsPolicy(), - "aws_organizations_policy_attachment": resourceAwsOrganizationsPolicyAttachment(), - "aws_organizations_organizational_unit": resourceAwsOrganizationsOrganizationalUnit(), - "aws_placement_group": resourceAwsPlacementGroup(), - "aws_prometheus_workspace": resourceAwsPrometheusWorkspace(), - "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), - "aws_qldb_ledger": resourceAwsQLDBLedger(), - "aws_quicksight_group": resourceAwsQuickSightGroup(), - "aws_quicksight_user": resourceAwsQuickSightUser(), - "aws_ram_principal_association": resourceAwsRamPrincipalAssociation(), - "aws_ram_resource_association": resourceAwsRamResourceAssociation(), - "aws_ram_resource_share": resourceAwsRamResourceShare(), - "aws_ram_resource_share_accepter": resourceAwsRamResourceShareAccepter(), - "aws_rds_cluster": resourceAwsRDSCluster(), - "aws_rds_cluster_endpoint": resourceAwsRDSClusterEndpoint(), - "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), - "aws_rds_cluster_parameter_group": resourceAwsRDSClusterParameterGroup(), - "aws_rds_cluster_role_association": resourceAwsRDSClusterRoleAssociation(), - "aws_rds_global_cluster": resourceAwsRDSGlobalCluster(), - "aws_redshift_cluster": resourceAwsRedshiftCluster(), - "aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(), - "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), - "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), - "aws_redshift_snapshot_copy_grant": resourceAwsRedshiftSnapshotCopyGrant(), - "aws_redshift_snapshot_schedule": resourceAwsRedshiftSnapshotSchedule(), - "aws_redshift_snapshot_schedule_association": resourceAwsRedshiftSnapshotScheduleAssociation(), - "aws_redshift_event_subscription": resourceAwsRedshiftEventSubscription(), - "aws_resourcegroups_group": resourceAwsResourceGroupsGroup(), - "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), - "aws_route53_hosted_zone_dnssec": resourceAwsRoute53HostedZoneDnssec(), - "aws_route53_key_signing_key": resourceAwsRoute53KeySigningKey(), - "aws_route53_query_log": resourceAwsRoute53QueryLog(), - "aws_route53_record": resourceAwsRoute53Record(), - "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), - "aws_route53_vpc_association_authorization": resourceAwsRoute53VPCAssociationAuthorization(), - "aws_route53_zone": resourceAwsRoute53Zone(), - "aws_route53_health_check": resourceAwsRoute53HealthCheck(), - "aws_route53_resolver_dnssec_config": resourceAwsRoute53ResolverDnssecConfig(), - "aws_route53_resolver_endpoint": resourceAwsRoute53ResolverEndpoint(), - "aws_route53_resolver_firewall_config": resourceAwsRoute53ResolverFirewallConfig(), - "aws_route53_resolver_firewall_domain_list": resourceAwsRoute53ResolverFirewallDomainList(), - "aws_route53_resolver_firewall_rule": resourceAwsRoute53ResolverFirewallRule(), - "aws_route53_resolver_firewall_rule_group": resourceAwsRoute53ResolverFirewallRuleGroup(), - "aws_route53_resolver_firewall_rule_group_association": resourceAwsRoute53ResolverFirewallRuleGroupAssociation(), - "aws_route53_resolver_query_log_config": resourceAwsRoute53ResolverQueryLogConfig(), - "aws_route53_resolver_query_log_config_association": resourceAwsRoute53ResolverQueryLogConfigAssociation(), - "aws_route53_resolver_rule_association": resourceAwsRoute53ResolverRuleAssociation(), - "aws_route53_resolver_rule": resourceAwsRoute53ResolverRule(), - "aws_route": resourceAwsRoute(), - "aws_route_table": resourceAwsRouteTable(), - "aws_default_route_table": resourceAwsDefaultRouteTable(), - "aws_route_table_association": resourceAwsRouteTableAssociation(), - "aws_sagemaker_app": resourceAwsSagemakerApp(), - "aws_sagemaker_app_image_config": resourceAwsSagemakerAppImageConfig(), - "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), - "aws_sagemaker_domain": resourceAwsSagemakerDomain(), - "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), - "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), - "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), - "aws_sagemaker_image": resourceAwsSagemakerImage(), - "aws_sagemaker_image_version": resourceAwsSagemakerImageVersion(), - "aws_sagemaker_model": resourceAwsSagemakerModel(), - "aws_sagemaker_model_package_group": resourceAwsSagemakerModelPackageGroup(), - "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), - "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), - "aws_sagemaker_user_profile": resourceAwsSagemakerUserProfile(), - "aws_sagemaker_workforce": resourceAwsSagemakerWorkforce(), - "aws_sagemaker_workteam": resourceAwsSagemakerWorkteam(), - "aws_schemas_discoverer": resourceAwsSchemasDiscoverer(), - "aws_schemas_registry": resourceAwsSchemasRegistry(), - "aws_schemas_schema": resourceAwsSchemasSchema(), - "aws_secretsmanager_secret": resourceAwsSecretsManagerSecret(), - "aws_secretsmanager_secret_policy": resourceAwsSecretsManagerSecretPolicy(), - "aws_secretsmanager_secret_version": resourceAwsSecretsManagerSecretVersion(), - "aws_secretsmanager_secret_rotation": resourceAwsSecretsManagerSecretRotation(), - "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), - "aws_ses_domain_identity": resourceAwsSesDomainIdentity(), - "aws_ses_domain_identity_verification": resourceAwsSesDomainIdentityVerification(), - "aws_ses_domain_dkim": resourceAwsSesDomainDkim(), - "aws_ses_domain_mail_from": resourceAwsSesDomainMailFrom(), - "aws_ses_email_identity": resourceAwsSesEmailIdentity(), - "aws_ses_identity_policy": resourceAwsSesIdentityPolicy(), - "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), - "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), - "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), - "aws_ses_configuration_set": resourceAwsSesConfigurationSet(), - "aws_ses_event_destination": resourceAwsSesEventDestination(), - "aws_ses_identity_notification_topic": resourceAwsSesNotificationTopic(), - "aws_ses_template": resourceAwsSesTemplate(), - "aws_s3_access_point": resourceAwsS3AccessPoint(), - "aws_s3_account_public_access_block": resourceAwsS3AccountPublicAccessBlock(), - "aws_s3_bucket": resourceAwsS3Bucket(), - "aws_s3_bucket_analytics_configuration": resourceAwsS3BucketAnalyticsConfiguration(), - "aws_s3_bucket_policy": resourceAwsS3BucketPolicy(), - "aws_s3_bucket_public_access_block": resourceAwsS3BucketPublicAccessBlock(), - "aws_s3_bucket_object": resourceAwsS3BucketObject(), - "aws_s3_bucket_ownership_controls": resourceAwsS3BucketOwnershipControls(), - "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), - "aws_s3_bucket_metric": resourceAwsS3BucketMetric(), - "aws_s3_bucket_inventory": resourceAwsS3BucketInventory(), - "aws_s3_bucket_replication_configuration": resourceAwsS3BucketReplicationConfiguration(), - "aws_s3_object_copy": resourceAwsS3ObjectCopy(), - "aws_s3control_bucket": resourceAwsS3ControlBucket(), - "aws_s3control_bucket_policy": resourceAwsS3ControlBucketPolicy(), - "aws_s3control_bucket_lifecycle_configuration": resourceAwsS3ControlBucketLifecycleConfiguration(), - "aws_s3outposts_endpoint": resourceAwsS3OutpostsEndpoint(), - "aws_security_group": resourceAwsSecurityGroup(), - "aws_network_interface_sg_attachment": resourceAwsNetworkInterfaceSGAttachment(), - "aws_default_security_group": resourceAwsDefaultSecurityGroup(), - "aws_security_group_rule": resourceAwsSecurityGroupRule(), - "aws_securityhub_account": resourceAwsSecurityHubAccount(), - "aws_securityhub_action_target": resourceAwsSecurityHubActionTarget(), - "aws_securityhub_insight": resourceAwsSecurityHubInsight(), - "aws_securityhub_invite_accepter": resourceAwsSecurityHubInviteAccepter(), - "aws_securityhub_member": resourceAwsSecurityHubMember(), - "aws_securityhub_organization_admin_account": resourceAwsSecurityHubOrganizationAdminAccount(), - "aws_securityhub_organization_configuration": resourceAwsSecurityHubOrganizationConfiguration(), - "aws_securityhub_product_subscription": resourceAwsSecurityHubProductSubscription(), - "aws_securityhub_standards_control": resourceAwsSecurityHubStandardsControl(), - "aws_securityhub_standards_subscription": resourceAwsSecurityHubStandardsSubscription(), - "aws_servicecatalog_budget_resource_association": resourceAwsServiceCatalogBudgetResourceAssociation(), - "aws_servicecatalog_constraint": resourceAwsServiceCatalogConstraint(), - "aws_servicecatalog_organizations_access": resourceAwsServiceCatalogOrganizationsAccess(), - "aws_servicecatalog_portfolio": resourceAwsServiceCatalogPortfolio(), - "aws_servicecatalog_portfolio_share": resourceAwsServiceCatalogPortfolioShare(), - "aws_servicecatalog_product": resourceAwsServiceCatalogProduct(), - "aws_servicecatalog_provisioned_product": resourceAwsServiceCatalogProvisionedProduct(), - "aws_servicecatalog_service_action": resourceAwsServiceCatalogServiceAction(), - "aws_servicecatalog_tag_option": resourceAwsServiceCatalogTagOption(), - "aws_servicecatalog_tag_option_resource_association": resourceAwsServiceCatalogTagOptionResourceAssociation(), - "aws_servicecatalog_principal_portfolio_association": resourceAwsServiceCatalogPrincipalPortfolioAssociation(), - "aws_servicecatalog_product_portfolio_association": resourceAwsServiceCatalogProductPortfolioAssociation(), - "aws_servicecatalog_provisioning_artifact": resourceAwsServiceCatalogProvisioningArtifact(), - "aws_service_discovery_http_namespace": resourceAwsServiceDiscoveryHttpNamespace(), - "aws_service_discovery_private_dns_namespace": resourceAwsServiceDiscoveryPrivateDnsNamespace(), - "aws_service_discovery_public_dns_namespace": resourceAwsServiceDiscoveryPublicDnsNamespace(), - "aws_service_discovery_service": resourceAwsServiceDiscoveryService(), - "aws_servicequotas_service_quota": resourceAwsServiceQuotasServiceQuota(), - "aws_shield_protection": resourceAwsShieldProtection(), - "aws_signer_signing_job": resourceAwsSignerSigningJob(), - "aws_signer_signing_profile": resourceAwsSignerSigningProfile(), - "aws_signer_signing_profile_permission": resourceAwsSignerSigningProfilePermission(), - "aws_simpledb_domain": resourceAwsSimpleDBDomain(), - "aws_ssm_activation": resourceAwsSsmActivation(), - "aws_ssm_association": resourceAwsSsmAssociation(), - "aws_ssm_document": resourceAwsSsmDocument(), - "aws_ssm_maintenance_window": resourceAwsSsmMaintenanceWindow(), - "aws_ssm_maintenance_window_target": resourceAwsSsmMaintenanceWindowTarget(), - "aws_ssm_maintenance_window_task": resourceAwsSsmMaintenanceWindowTask(), - "aws_ssm_patch_baseline": resourceAwsSsmPatchBaseline(), - "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), - "aws_ssm_parameter": resourceAwsSsmParameter(), - "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), - "aws_ssoadmin_account_assignment": resourceAwsSsoAdminAccountAssignment(), - "aws_ssoadmin_managed_policy_attachment": resourceAwsSsoAdminManagedPolicyAttachment(), - "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), - "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), - "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), - "aws_storagegateway_cached_iscsi_volume": resourceAwsStorageGatewayCachedIscsiVolume(), - "aws_storagegateway_file_system_association": resourceAwsStorageGatewayFileSystemAssociation(), - "aws_storagegateway_gateway": resourceAwsStorageGatewayGateway(), - "aws_storagegateway_nfs_file_share": resourceAwsStorageGatewayNfsFileShare(), - "aws_storagegateway_smb_file_share": resourceAwsStorageGatewaySmbFileShare(), - "aws_storagegateway_stored_iscsi_volume": resourceAwsStorageGatewayStoredIscsiVolume(), - "aws_storagegateway_tape_pool": resourceAwsStorageGatewayTapePool(), - "aws_storagegateway_upload_buffer": resourceAwsStorageGatewayUploadBuffer(), - "aws_storagegateway_working_storage": resourceAwsStorageGatewayWorkingStorage(), - "aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(), - "aws_spot_instance_request": resourceAwsSpotInstanceRequest(), - "aws_spot_fleet_request": resourceAwsSpotFleetRequest(), - "aws_sqs_queue": resourceAwsSqsQueue(), - "aws_sqs_queue_policy": resourceAwsSqsQueuePolicy(), - "aws_snapshot_create_volume_permission": resourceAwsSnapshotCreateVolumePermission(), - "aws_sns_platform_application": resourceAwsSnsPlatformApplication(), - "aws_sns_sms_preferences": resourceAwsSnsSmsPreferences(), - "aws_sns_topic": resourceAwsSnsTopic(), - "aws_sns_topic_policy": resourceAwsSnsTopicPolicy(), - "aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(), - "aws_sfn_activity": resourceAwsSfnActivity(), - "aws_sfn_state_machine": resourceAwsSfnStateMachine(), - "aws_default_subnet": resourceAwsDefaultSubnet(), - "aws_subnet": resourceAwsSubnet(), - "aws_swf_domain": resourceAwsSwfDomain(), - "aws_synthetics_canary": resourceAwsSyntheticsCanary(), - "aws_timestreamwrite_database": resourceAwsTimestreamWriteDatabase(), - "aws_timestreamwrite_table": resourceAwsTimestreamWriteTable(), - "aws_transfer_server": resourceAwsTransferServer(), - "aws_transfer_ssh_key": resourceAwsTransferSshKey(), - "aws_transfer_user": resourceAwsTransferUser(), - "aws_volume_attachment": resourceAwsVolumeAttachment(), - "aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(), - "aws_default_vpc_dhcp_options": resourceAwsDefaultVpcDhcpOptions(), - "aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(), - "aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(), - "aws_vpc_peering_connection_accepter": resourceAwsVpcPeeringConnectionAccepter(), - "aws_vpc_peering_connection_options": resourceAwsVpcPeeringConnectionOptions(), - "aws_default_vpc": resourceAwsDefaultVpc(), - "aws_vpc": resourceAwsVpc(), - "aws_vpc_endpoint": resourceAwsVpcEndpoint(), - "aws_vpc_endpoint_connection_notification": resourceAwsVpcEndpointConnectionNotification(), - "aws_vpc_endpoint_route_table_association": resourceAwsVpcEndpointRouteTableAssociation(), - "aws_vpc_endpoint_subnet_association": resourceAwsVpcEndpointSubnetAssociation(), - "aws_vpc_endpoint_service": resourceAwsVpcEndpointService(), - "aws_vpc_endpoint_service_allowed_principal": resourceAwsVpcEndpointServiceAllowedPrincipal(), - "aws_vpc_ipv4_cidr_block_association": resourceAwsVpcIpv4CidrBlockAssociation(), - "aws_vpn_connection": resourceAwsVpnConnection(), - "aws_vpn_connection_route": resourceAwsVpnConnectionRoute(), - "aws_vpn_gateway": resourceAwsVpnGateway(), - "aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(), - "aws_vpn_gateway_route_propagation": resourceAwsVpnGatewayRoutePropagation(), - "aws_waf_byte_match_set": resourceAwsWafByteMatchSet(), - "aws_waf_ipset": resourceAwsWafIPSet(), - "aws_waf_rate_based_rule": resourceAwsWafRateBasedRule(), - "aws_waf_regex_match_set": resourceAwsWafRegexMatchSet(), - "aws_waf_regex_pattern_set": resourceAwsWafRegexPatternSet(), - "aws_waf_rule": resourceAwsWafRule(), - "aws_waf_rule_group": resourceAwsWafRuleGroup(), - "aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(), - "aws_waf_web_acl": resourceAwsWafWebAcl(), - "aws_waf_xss_match_set": resourceAwsWafXssMatchSet(), - "aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(), - "aws_waf_geo_match_set": resourceAwsWafGeoMatchSet(), - "aws_wafregional_byte_match_set": resourceAwsWafRegionalByteMatchSet(), - "aws_wafregional_geo_match_set": resourceAwsWafRegionalGeoMatchSet(), - "aws_wafregional_ipset": resourceAwsWafRegionalIPSet(), - "aws_wafregional_rate_based_rule": resourceAwsWafRegionalRateBasedRule(), - "aws_wafregional_regex_match_set": resourceAwsWafRegionalRegexMatchSet(), - "aws_wafregional_regex_pattern_set": resourceAwsWafRegionalRegexPatternSet(), - "aws_wafregional_rule": resourceAwsWafRegionalRule(), - "aws_wafregional_rule_group": resourceAwsWafRegionalRuleGroup(), - "aws_wafregional_size_constraint_set": resourceAwsWafRegionalSizeConstraintSet(), - "aws_wafregional_sql_injection_match_set": resourceAwsWafRegionalSqlInjectionMatchSet(), - "aws_wafregional_xss_match_set": resourceAwsWafRegionalXssMatchSet(), - "aws_wafregional_web_acl": resourceAwsWafRegionalWebAcl(), - "aws_wafregional_web_acl_association": resourceAwsWafRegionalWebAclAssociation(), - "aws_wafv2_ip_set": resourceAwsWafv2IPSet(), - "aws_wafv2_regex_pattern_set": resourceAwsWafv2RegexPatternSet(), - "aws_wafv2_rule_group": resourceAwsWafv2RuleGroup(), - "aws_wafv2_web_acl": resourceAwsWafv2WebACL(), - "aws_wafv2_web_acl_association": resourceAwsWafv2WebACLAssociation(), - "aws_wafv2_web_acl_logging_configuration": resourceAwsWafv2WebACLLoggingConfiguration(), - "aws_worklink_fleet": resourceAwsWorkLinkFleet(), - "aws_worklink_website_certificate_authority_association": resourceAwsWorkLinkWebsiteCertificateAuthorityAssociation(), - "aws_workspaces_directory": resourceAwsWorkspacesDirectory(), - "aws_workspaces_workspace": resourceAwsWorkspacesWorkspace(), - "aws_batch_compute_environment": resourceAwsBatchComputeEnvironment(), - "aws_batch_job_definition": resourceAwsBatchJobDefinition(), - "aws_batch_job_queue": resourceAwsBatchJobQueue(), - "aws_pinpoint_app": resourceAwsPinpointApp(), - "aws_pinpoint_adm_channel": resourceAwsPinpointADMChannel(), - "aws_pinpoint_apns_channel": resourceAwsPinpointAPNSChannel(), - "aws_pinpoint_apns_sandbox_channel": resourceAwsPinpointAPNSSandboxChannel(), - "aws_pinpoint_apns_voip_channel": resourceAwsPinpointAPNSVoipChannel(), - "aws_pinpoint_apns_voip_sandbox_channel": resourceAwsPinpointAPNSVoipSandboxChannel(), - "aws_pinpoint_baidu_channel": resourceAwsPinpointBaiduChannel(), - "aws_pinpoint_email_channel": resourceAwsPinpointEmailChannel(), - "aws_pinpoint_event_stream": resourceAwsPinpointEventStream(), - "aws_pinpoint_gcm_channel": resourceAwsPinpointGCMChannel(), - "aws_pinpoint_sms_channel": resourceAwsPinpointSMSChannel(), - "aws_xray_encryption_config": resourceAwsXrayEncryptionConfig(), - "aws_xray_group": resourceAwsXrayGroup(), - "aws_xray_sampling_rule": resourceAwsXraySamplingRule(), - "aws_workspaces_ip_group": resourceAwsWorkspacesIpGroup(), - - // ALBs are actually LBs because they can be type `network` or `application` - // To avoid regressions, we will add a new resource for each and they both point - // back to the old ALB version. IF the Terraform supported aliases for resources - // this would be a whole lot simpler - "aws_alb": resourceAwsLb(), - "aws_lb": resourceAwsLb(), - "aws_alb_listener": resourceAwsLbListener(), - "aws_lb_listener": resourceAwsLbListener(), - "aws_alb_listener_certificate": resourceAwsLbListenerCertificate(), - "aws_lb_listener_certificate": resourceAwsLbListenerCertificate(), - "aws_alb_listener_rule": resourceAwsLbbListenerRule(), - "aws_lb_listener_rule": resourceAwsLbbListenerRule(), - "aws_alb_target_group": resourceAwsLbTargetGroup(), - "aws_lb_target_group": resourceAwsLbTargetGroup(), - "aws_alb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), - "aws_lb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), - }, - } - - // Avoid Go formatting churn and Git conflicts - // You probably should not do this - provider.DataSourcesMap["aws_serverlessapplicationrepository_application"] = dataSourceAwsServerlessApplicationRepositoryApplication() - provider.ResourcesMap["aws_serverlessapplicationrepository_cloudformation_stack"] = resourceAwsServerlessApplicationRepositoryCloudFormationStack() - - provider.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) { - terraformVersion := provider.TerraformVersion - if terraformVersion == "" { - // Terraform 0.12 introduced this field to the protocol - // We can therefore assume that if it's missing it's 0.10 or 0.11 - terraformVersion = "0.11+compatible" - } - return providerConfigure(d, terraformVersion) - } - - return provider -} - -var descriptions map[string]string -var endpointServiceNames []string - -func init() { - descriptions = map[string]string{ - "region": "The region where AWS operations will take place. Examples\n" + - "are us-east-1, us-west-2, etc.", // lintignore:AWSAT003 - - "access_key": "The access key for API operations. You can retrieve this\n" + - "from the 'Security & Credentials' section of the AWS console.", - - "secret_key": "The secret key for API operations. You can retrieve this\n" + - "from the 'Security & Credentials' section of the AWS console.", - - "profile": "The profile for API operations. If not set, the default profile\n" + - "created with `aws configure` will be used.", - - "shared_credentials_file": "The path to the shared credentials file. If not set\n" + - "this defaults to ~/.aws/credentials.", - - "token": "session token. A session token is only required if you are\n" + - "using temporary security credentials.", - - "max_retries": "The maximum number of times an AWS API request is\n" + - "being executed. If the API request still fails, an error is\n" + - "thrown.", - - "endpoint": "Use this to override the default service endpoint URL", - - "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + - "default value is `false`", - - "skip_credentials_validation": "Skip the credentials validation via STS API. " + - "Used for AWS API implementations that do not have STS available/implemented.", - - "skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " + - "Used by users that don't have ec2:DescribeAccountAttributes permissions.", - - "skip_region_validation": "Skip static validation of region name. " + - "Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).", - - "skip_requesting_account_id": "Skip requesting the account ID. " + - "Used for AWS API implementations that do not have IAM/STS API and/or metadata API.", - - "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + - "Used for AWS API implementations that do not have a metadata api endpoint.", - - "s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" + - "i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" + - "use virtual hosted bucket addressing when possible\n" + - "(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.", - } - - endpointServiceNames = []string{ - "accessanalyzer", - "acm", - "acmpca", - "amplify", - "apigateway", - "appconfig", - "applicationautoscaling", - "applicationinsights", - "appmesh", - "apprunner", - "appstream", - "appsync", - "athena", - "auditmanager", - "autoscaling", - "autoscalingplans", - "backup", - "batch", - "budgets", - "chime", - "cloud9", - "cloudformation", - "cloudfront", - "cloudhsm", - "cloudsearch", - "cloudtrail", - "cloudwatch", - "cloudwatchevents", - "cloudwatchlogs", - "codeartifact", - "codebuild", - "codecommit", - "codedeploy", - "codepipeline", - "codestarconnections", - "cognitoidentity", - "cognitoidp", - "configservice", - "connect", - "cur", - "dataexchange", - "datapipeline", - "datasync", - "dax", - "detective", - "devicefarm", - "directconnect", - "dlm", - "dms", - "docdb", - "ds", - "dynamodb", - "ec2", - "ecr", - "ecrpublic", - "ecs", - "efs", - "eks", - "elasticache", - "elasticbeanstalk", - "elastictranscoder", - "elb", - "emr", - "emrcontainers", - "es", - "firehose", - "fms", - "forecast", - "fsx", - "gamelift", - "glacier", - "globalaccelerator", - "glue", - "greengrass", - "guardduty", - "iam", - "identitystore", - "imagebuilder", - "inspector", - "iot", - "iotanalytics", - "iotevents", - "kafka", - "kinesis", - "kinesisanalytics", - "kinesisanalyticsv2", - "kinesisvideo", - "kms", - "lakeformation", - "lambda", - "lexmodels", - "licensemanager", - "lightsail", - "location", - "macie", - "macie2", - "managedblockchain", - "marketplacecatalog", - "mediaconnect", - "mediaconvert", - "medialive", - "mediapackage", - "mediastore", - "mediastoredata", - "mq", - "mwaa", - "neptune", - "networkfirewall", - "networkmanager", - "opsworks", - "organizations", - "outposts", - "personalize", - "pinpoint", - "pricing", - "qldb", - "quicksight", - "ram", - "rds", - "redshift", - "resourcegroups", - "resourcegroupstaggingapi", - "route53", - "route53domains", - "route53resolver", - "s3", - "s3control", - "s3outposts", - "sagemaker", - "schemas", - "sdb", - "secretsmanager", - "securityhub", - "serverlessrepo", - "servicecatalog", - "servicediscovery", - "servicequotas", - "ses", - "shield", - "signer", - "sns", - "sqs", - "ssm", - "ssoadmin", - "stepfunctions", - "storagegateway", - "sts", - "swf", - "synthetics", - "timestreamwrite", - "transfer", - "waf", - "wafregional", - "wafv2", - "worklink", - "workmail", - "workspaces", - "xray", - } -} - -func providerConfigure(d *schema.ResourceData, terraformVersion string) (interface{}, error) { - config := Config{ - AccessKey: d.Get("access_key").(string), - SecretKey: d.Get("secret_key").(string), - Profile: d.Get("profile").(string), - Token: d.Get("token").(string), - Region: d.Get("region").(string), - CredsFilename: d.Get("shared_credentials_file").(string), - DefaultTagsConfig: expandProviderDefaultTags(d.Get("default_tags").([]interface{})), - Endpoints: make(map[string]string), - MaxRetries: d.Get("max_retries").(int), - IgnoreTagsConfig: expandProviderIgnoreTags(d.Get("ignore_tags").([]interface{})), - Insecure: d.Get("insecure").(bool), - SkipCredsValidation: d.Get("skip_credentials_validation").(bool), - SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool), - SkipRegionValidation: d.Get("skip_region_validation").(bool), - SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), - SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), - S3ForcePathStyle: d.Get("s3_force_path_style").(bool), - terraformVersion: terraformVersion, - } - - if l, ok := d.Get("assume_role").([]interface{}); ok && len(l) > 0 && l[0] != nil { - m := l[0].(map[string]interface{}) - - if v, ok := m["duration_seconds"].(int); ok && v != 0 { - config.AssumeRoleDurationSeconds = v - } - - if v, ok := m["external_id"].(string); ok && v != "" { - config.AssumeRoleExternalID = v - } - - if v, ok := m["policy"].(string); ok && v != "" { - config.AssumeRolePolicy = v - } - - if policyARNSet, ok := m["policy_arns"].(*schema.Set); ok && policyARNSet.Len() > 0 { - for _, policyARNRaw := range policyARNSet.List() { - policyARN, ok := policyARNRaw.(string) - - if !ok { - continue - } - - config.AssumeRolePolicyARNs = append(config.AssumeRolePolicyARNs, policyARN) - } - } - - if v, ok := m["role_arn"].(string); ok && v != "" { - config.AssumeRoleARN = v - } - - if v, ok := m["session_name"].(string); ok && v != "" { - config.AssumeRoleSessionName = v - } - - if tagMapRaw, ok := m["tags"].(map[string]interface{}); ok && len(tagMapRaw) > 0 { - config.AssumeRoleTags = make(map[string]string) - - for k, vRaw := range tagMapRaw { - v, ok := vRaw.(string) - - if !ok { - continue - } - - config.AssumeRoleTags[k] = v - } - } - - if transitiveTagKeySet, ok := m["transitive_tag_keys"].(*schema.Set); ok && transitiveTagKeySet.Len() > 0 { - for _, transitiveTagKeyRaw := range transitiveTagKeySet.List() { - transitiveTagKey, ok := transitiveTagKeyRaw.(string) - - if !ok { - continue - } - - config.AssumeRoleTransitiveTagKeys = append(config.AssumeRoleTransitiveTagKeys, transitiveTagKey) - } - } - - log.Printf("[INFO] assume_role configuration set: (ARN: %q, SessionID: %q, ExternalID: %q)", config.AssumeRoleARN, config.AssumeRoleSessionName, config.AssumeRoleExternalID) - } - - endpointsSet := d.Get("endpoints").(*schema.Set) - - for _, endpointsSetI := range endpointsSet.List() { - endpoints := endpointsSetI.(map[string]interface{}) - for _, endpointServiceName := range endpointServiceNames { - config.Endpoints[endpointServiceName] = endpoints[endpointServiceName].(string) - } - } - - if v, ok := d.GetOk("allowed_account_ids"); ok { - for _, accountIDRaw := range v.(*schema.Set).List() { - config.AllowedAccountIds = append(config.AllowedAccountIds, accountIDRaw.(string)) - } - } - - if v, ok := d.GetOk("forbidden_account_ids"); ok { - for _, accountIDRaw := range v.(*schema.Set).List() { - config.ForbiddenAccountIds = append(config.ForbiddenAccountIds, accountIDRaw.(string)) - } - } - - return config.Client() -} - -// This is a global MutexKV for use within this plugin. -var awsMutexKV = mutexkv.NewMutexKV() - -func assumeRoleSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "duration_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: "Seconds to restrict the assume role session duration.", - }, - "external_id": { - Type: schema.TypeString, - Optional: true, - Description: "Unique identifier that might be required for assuming a role in another account.", - }, - "policy": { - Type: schema.TypeString, - Optional: true, - Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", - ValidateFunc: validation.StringIsJSON, - }, - "policy_arns": { - Type: schema.TypeSet, - Optional: true, - Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateArn, - }, - }, - "role_arn": { - Type: schema.TypeString, - Optional: true, - Description: "Amazon Resource Name of an IAM Role to assume prior to making API calls.", - ValidateFunc: validateArn, - }, - "session_name": { - Type: schema.TypeString, - Optional: true, - Description: "Identifier for the assumed role session.", - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - Description: "Assume role session tags.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "transitive_tag_keys": { - Type: schema.TypeSet, - Optional: true, - Description: "Assume role session tag keys to pass to any subsequent sessions.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - } -} - -func endpointsSchema() *schema.Schema { - endpointsAttributes := make(map[string]*schema.Schema) - - for _, endpointServiceName := range endpointServiceNames { - endpointsAttributes[endpointServiceName] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["endpoint"], - } - } - - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: endpointsAttributes, - }, - } -} - -func expandProviderDefaultTags(l []interface{}) *keyvaluetags.DefaultConfig { - if len(l) == 0 || l[0] == nil { - return nil - } - - defaultConfig := &keyvaluetags.DefaultConfig{} - m := l[0].(map[string]interface{}) - - if v, ok := m["tags"].(map[string]interface{}); ok { - defaultConfig.Tags = keyvaluetags.New(v) - } - return defaultConfig -} - -func expandProviderIgnoreTags(l []interface{}) *keyvaluetags.IgnoreConfig { - if len(l) == 0 || l[0] == nil { - return nil - } - - ignoreConfig := &keyvaluetags.IgnoreConfig{} - m := l[0].(map[string]interface{}) - - if v, ok := m["keys"].(*schema.Set); ok { - ignoreConfig.Keys = keyvaluetags.New(v.List()) - } - - if v, ok := m["key_prefixes"].(*schema.Set); ok { - ignoreConfig.KeyPrefixes = keyvaluetags.New(v.List()) - } - - return ignoreConfig -} - -// ReverseDns switches a DNS hostname to reverse DNS and vice-versa. -func ReverseDns(hostname string) string { - parts := strings.Split(hostname, ".") - - for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { - parts[i], parts[j] = parts[j], parts[i] - } - - return strings.Join(parts, ".") -} diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 4c693709341..792c5e4dc3b 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -13,9 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3/waiter" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" + "github.com/terraform-providers/terraform-provider-aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/internal/service/s3/waiter" + "github.com/terraform-providers/terraform-provider-aws/internal/tfresource" ) func ResourceBucketReplicationConfiguration() *schema.Resource { From a740404b901a7951e1d8e06fc7ccbda7dceb5992 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 3 Nov 2021 14:39:30 -0700 Subject: [PATCH 75/80] tracking changes from upstream --- .../s3/bucket_replication_configuration.go | 41 +-- .../bucket_replication_configuration_test.go | 245 +++++++++--------- 2 files changed, 147 insertions(+), 139 deletions(-) diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 792c5e4dc3b..d41c3dc37c5 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -13,9 +13,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/internal/keyvaluetags" - "github.com/terraform-providers/terraform-provider-aws/internal/service/s3/waiter" - "github.com/terraform-providers/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) func ResourceBucketReplicationConfiguration() *schema.Resource { @@ -61,12 +62,12 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "account_id": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateAwsAccountId, + ValidateFunc: verify.ValidAccountID, }, "bucket": { Type: schema.TypeString, Required: true, - ValidateFunc: validateArn, + ValidateFunc: verify.ValidARN, }, "storage_class": { Type: schema.TypeString, @@ -221,7 +222,7 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Optional: true, ValidateFunc: validation.StringLenBetween(0, 1024), }, - "tags": tagsSchema(), + "tags": tftags.TagsSchema(), }, }, }, @@ -293,10 +294,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met input.Bucket = bucket } - s3conn := meta.(*AWSClient).s3conn + conn := meta.(*conns.AWSClient).S3Conn - err := resource.Retry(waiter.BucketCreatedTimeout, func() *resource.RetryError { - _, err := s3conn.HeadBucket(input) + err := resource.Retry(bucketCreatedTimeout, func() *resource.RetryError { + _, err := conn.HeadBucket(input) if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { return resource.RetryableError(err) @@ -314,7 +315,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met }) if tfresource.TimedOut(err) { - _, err = s3conn.HeadBucket(input) + _, err = conn.HeadBucket(input) } if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { @@ -332,12 +333,12 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } // Read the bucket replication configuration - replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + replicationResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketReplication(&s3.GetBucketReplicationInput{ Bucket: bucket, }) }) - if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { + if err != nil && !tfawserr.ErrMessageContains(err, "ReplicationConfigurationNotFoundError", "") { return fmt.Errorf("error getting S3 Bucket replication: %s", err) } replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) @@ -433,11 +434,11 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met m["prefix"] = aws.StringValue(f.Prefix) } if t := f.Tag; t != nil { - m["tags"] = keyvaluetags.S3KeyValueTags([]*s3.Tag{t}).IgnoreAws().Map() + m["tags"] = KeyValueTags([]*s3.Tag{t}).IgnoreAWS().Map() } if a := f.And; a != nil { m["prefix"] = aws.StringValue(a.Prefix) - m["tags"] = keyvaluetags.S3KeyValueTags(a.Tags).IgnoreAws().Map() + m["tags"] = KeyValueTags(a.Tags).IgnoreAWS().Map() } t["filter"] = []interface{}{m} @@ -456,7 +457,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn + s3conn := meta.(*conns.AWSClient).S3Conn bucket := d.Get("bucket").(string) rc := &s3.ReplicationConfiguration{} @@ -575,7 +576,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) rcRule.Filter = &s3.ReplicationRuleFilter{} filter := f[0].(map[string]interface{}) - tags := keyvaluetags.New(filter["tags"]).IgnoreAws().S3Tags() + tags := Tags(tftags.New(filter["tags"]).IgnoreAWS()) if len(tags) > 0 { rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ Prefix: aws.String(filter["prefix"].(string)), @@ -609,7 +610,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m err := resource.Retry(1*time.Minute, func() *resource.RetryError { _, err := s3conn.PutBucketReplication(i) - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { return resource.RetryableError(err) } if err != nil { @@ -617,7 +618,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } return nil }) - if isResourceTimeoutError(err) { + if tfresource.TimedOut(err) { _, err = s3conn.PutBucketReplication(i) } if err != nil { @@ -628,7 +629,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn + s3conn := meta.(*conns.AWSClient).S3Conn bucket := d.Get("bucket").(string) log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 28bc9f8e502..5947bdc27cc 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1,4 +1,4 @@ -package s3 +package s3_test import ( "fmt" @@ -9,15 +9,18 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -26,19 +29,19 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -59,7 +62,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -80,7 +83,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -109,7 +112,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -117,12 +120,12 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: testAccErrorCheckSkipS3(t), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), @@ -169,7 +172,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test } func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -177,12 +180,12 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: testAccErrorCheckSkipS3(t), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), @@ -233,7 +236,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -241,12 +244,12 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: testAccErrorCheckSkipS3(t), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), @@ -285,8 +288,8 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -295,19 +298,19 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -339,7 +342,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -373,8 +376,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -383,19 +386,19 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -424,7 +427,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -449,8 +452,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo } func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -459,19 +462,19 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigRTC(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -509,8 +512,8 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -519,19 +522,19 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -563,7 +566,7 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -571,12 +574,12 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -594,8 +597,8 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -604,19 +607,19 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -643,7 +646,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -677,7 +680,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -712,7 +715,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -751,7 +754,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -795,28 +798,31 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := acctest.RandInt() - rName := acctest.RandomWithPrefix("tf-acc-test") - rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix("tf-acc-test") + rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3BucketDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { ID: aws.String("testid"), Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), StorageClass: aws.String(s3.ObjectStorageClassStandard), }, Status: aws.String(s3.ReplicationRuleStatusEnabled), @@ -854,28 +860,31 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) return } resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := acctest.RandInt() - rName := acctest.RandomWithPrefix("tf-acc-test") - rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix("tf-acc-test") + rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3BucketDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { ID: aws.String("testid"), Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), StorageClass: aws.String(s3.ObjectStorageClassStandard), }, Status: aws.String(s3.ReplicationRuleStatusEnabled), @@ -907,8 +916,8 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) } func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -927,19 +936,19 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -968,7 +977,7 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { }) } -func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { +func testAccCheckBucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] for _, rule := range rules { @@ -999,17 +1008,15 @@ func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRu } } - conn := testAccProvider.Meta().(*AWSClient).s3conn + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ Bucket: aws.String(rs.Primary.ID), }) - if err != nil { - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { - return fmt.Errorf("S3 bucket not found") - } - if rules == nil { - return nil - } + if err != nil && tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") { + return fmt.Errorf("S3 bucket not found") + } else if err != nil && rules == nil { + return nil + } else if err != nil { return fmt.Errorf("GetReplicationConfiguration error: %v", err) } @@ -1156,7 +1163,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { - return composeConfig( + return acctest.ConfigCompose( testAccAWSS3BucketReplicationConfigBasic(randInt), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { @@ -1230,7 +1237,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { - return composeConfig( + return acctest.ConfigCompose( testAccAWSS3BucketReplicationConfigBasic(randInt), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { @@ -1315,7 +1322,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { - return composeConfig( + return acctest.ConfigCompose( testAccAWSS3BucketReplicationConfigBasic(randInt), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { From 0ba4166c3c4254a3ad01d42c4caf9f35e9016dec Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sat, 6 Nov 2021 09:09:29 -0700 Subject: [PATCH 76/80] update tests to track V2 changes --- .../bucket_replication_configuration_test.go | 392 ++++-------------- 1 file changed, 90 insertions(+), 302 deletions(-) diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 5947bdc27cc..5575cee8b94 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -6,6 +6,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -16,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { @@ -34,7 +36,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), @@ -125,7 +127,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test }, ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), @@ -185,12 +187,12 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t }, ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", @@ -205,22 +207,10 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "priority": "2", "status": "Enabled", "filter.#": "1", - "filter.0.tags.%": "1", - "filter.0.tags.Key2": "Value2", + "filter.0.prefix": "prefix2", "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ - "id": "rule3", - "priority": "3", - "status": "Disabled", - "filter.#": "1", - "filter.0.prefix": "prefix3", - "filter.0.tags.%": "1", - "filter.0.tags.Key3": "Value3", - "destination.#": "1", - "destination.0.storage_class": "ONEZONE_IA", - }), ), }, { @@ -249,7 +239,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { }, ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), @@ -269,8 +259,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { "priority": "2", "status": "Enabled", "filter.#": "1", - "filter.0.tags.%": "1", - "filter.0.tags.Key2": "Value2", + "filter.0.prefix": "prefix1", "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), @@ -303,7 +292,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), @@ -391,7 +380,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), @@ -467,7 +456,7 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigRTC(rInt), @@ -496,7 +485,7 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }, }, DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), }, Filter: &s3.ReplicationRuleFilter{ Prefix: aws.String("foo"), @@ -527,7 +516,7 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), @@ -544,7 +533,7 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), }, DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), }, Filter: &s3.ReplicationRuleFilter{ Prefix: aws.String("foo"), @@ -579,7 +568,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -612,35 +601,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( @@ -675,123 +637,6 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - And: &s3.ReplicationRuleAndOperator{ - Prefix: aws.String(""), - Tags: []*s3.Tag{ - { - Key: aws.String("ReplicateMe"), - Value: aws.String("Yes"), - }, - }, - }, - }, - Priority: aws.Int64(42), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - And: &s3.ReplicationRuleAndOperator{ - Prefix: aws.String("foo"), - Tags: []*s3.Tag{ - { - Key: aws.String("ReplicateMe"), - Value: aws.String("Yes"), - }, - { - Key: aws.String("AnotherTag"), - Value: aws.String("OK"), - }, - }, - }, - }, - Priority: aws.Int64(41), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - And: &s3.ReplicationRuleAndOperator{ - Prefix: aws.String(""), - Tags: []*s3.Tag{ - { - Key: aws.String("ReplicateMe"), - Value: aws.String("Yes"), - }, - { - Key: aws.String("AnotherTag"), - Value: aws.String("OK"), - }, - { - Key: aws.String("Foo"), - Value: aws.String("Bar"), - }, - }, - }, - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, }, }) } @@ -809,7 +654,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), @@ -871,7 +716,7 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), @@ -941,7 +786,7 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), @@ -1038,6 +883,39 @@ func testAccCheckBucketReplicationRules(n string, rules []*s3.ReplicationRule) r } } +func testAccCheckReplicationConfigDestroy(s *terraform.State, provider *schema.Provider) error { + conn := provider.Meta().(*conns.AWSClient).S3Conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket_replication_configuration" { + continue + } + input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.GetBucketReplication(input) + + if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "NotFound", "") { + return nil + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("AWS S3 Bucket Replication Configuration still exists: %s", rs.Primary.ID)) + }) + + if tfresource.TimedOut(err) { + _, err = conn.GetBucketReplication(input) + } + + if err != nil { + return err + } + } + return nil +} + func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { return fmt.Sprintf(` data "aws_partition" "current" {} @@ -1119,6 +997,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "foo" } status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn replication_time { @@ -1154,6 +1035,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" } } + delete_marker_replication { + status = "Enabled" + } + status = "Enabled" destination { bucket = aws_s3_bucket.destination.arn @@ -1201,6 +1086,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { filter {} + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination.arn storage_class = "STANDARD" @@ -1214,6 +1103,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { filter {} + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination2.arn storage_class = "STANDARD_IA" @@ -1227,6 +1120,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { filter {} + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination3.arn storage_class = "ONEZONE_IA" @@ -1277,6 +1174,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "prefix1" } + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination.arn storage_class = "STANDARD" @@ -1289,9 +1190,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - tags = { - Key2 = "Value2" - } + prefix = "prefix2" + } + + delete_marker_replication { + status = "Enabled" } destination { @@ -1300,24 +1203,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } - rules { - id = "rule3" - priority = 3 - status = "Disabled" - - filter { - prefix = "prefix3" - - tags = { - Key3 = "Value3" - } - } - - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } - } }`, randInt)) } @@ -1350,6 +1235,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "prefix1" } + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination.arn storage_class = "STANDARD" @@ -1362,9 +1251,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - tags = { - Key2 = "Value2" - } + prefix = "prefix1" + } + + delete_marker_replication { + status = "Enabled" } destination { @@ -1512,28 +1403,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - filter { - prefix = "foo" - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1560,87 +1429,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - priority = 42 - - filter { - tags = { - ReplicateMe = "Yes" - } - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - priority = 41 - - filter { - prefix = "foo" - - tags = { - AnotherTag = "OK" - ReplicateMe = "Yes" - } - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - filter { - tags = { - AnotherTag = "OK" - Foo = "Bar" - ReplicateMe = "Yes" - } - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { return fmt.Sprintf(` resource "aws_iam_role" "test" { From f7d3965de35c95fd6aff9b9bdeee244df1d6245b Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 12 Nov 2021 15:02:16 -0500 Subject: [PATCH 77/80] run terrafmt on test file --- .../service/s3/bucket_replication_configuration_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 5575cee8b94..39dbc1e1697 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1190,7 +1190,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - prefix = "prefix2" + prefix = "prefix2" } delete_marker_replication { @@ -1251,9 +1251,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - prefix = "prefix1" + prefix = "prefix1" } - + delete_marker_replication { status = "Enabled" } From 37f2e23591004cd82e7566956b86a4c10f42d584 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 17 Nov 2021 21:12:48 -0500 Subject: [PATCH 78/80] CR updates; align with service packages structure; flex and enum --- internal/service/s3/bucket.go | 4 +- .../s3/bucket_replication_configuration.go | 625 +++----- .../bucket_replication_configuration_test.go | 1307 +++++++++-------- internal/service/s3/bucket_test.go | 64 + internal/service/s3/enum.go | 2 + internal/service/s3/flex.go | 686 +++++++++ 6 files changed, 1648 insertions(+), 1040 deletions(-) create mode 100644 internal/service/s3/flex.go diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 5bc4db22b41..9ff7a34dc70 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -818,7 +818,7 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("replication_configuration") { - if err := resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn, d); err != nil { + if err := resourceBucketInternalReplicationConfigurationUpdate(conn, d); err != nil { return err } } @@ -2033,7 +2033,7 @@ func resourceBucketObjectLockConfigurationUpdate(conn *s3.S3, d *schema.Resource return nil } -func resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { +func resourceBucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) replicationConfiguration := d.Get("replication_configuration").([]interface{}) diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index d41c3dc37c5..ec27a3fdc4d 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -1,11 +1,8 @@ package s3 import ( - "errors" "fmt" "log" - "net/http" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -21,10 +18,10 @@ import ( func ResourceBucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ - Create: resourceAwsS3BucketReplicationConfigurationPut, - Read: resourceAwsS3BucketReplicationConfigurationRead, - Update: resourceAwsS3BucketReplicationConfigurationUpdate, - Delete: resourceAwsS3BucketReplicationConfigurationDelete, + Create: resourceBucketReplicationConfigurationCreate, + Read: resourceBucketReplicationConfigurationRead, + Update: resourceBucketReplicationConfigurationUpdate, + Delete: resourceBucketReplicationConfigurationDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, @@ -32,34 +29,57 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "bucket": { Type: schema.TypeString, - Optional: true, - Computed: true, + Required: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(0, 63), + ValidateFunc: validation.StringLenBetween(1, 63), }, "role": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, }, - "rules": { + "rule": { Type: schema.TypeSet, Required: true, - Set: rulesHash, + //Set: rulesHash, + MaxItems: 1000, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 255), + "delete_marker_replication": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), + }, + }, + }, }, "destination": { Type: schema.TypeList, MaxItems: 1, - MinItems: 1, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "account_id": { + "access_control_translation": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + }, + }, + }, + }, + "account": { Type: schema.TypeString, Optional: true, ValidateFunc: verify.ValidAccountID, @@ -69,26 +89,16 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, - "storage_class": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), - }, - "replica_kms_key_id": { - Type: schema.TypeString, - Optional: true, - }, - "access_control_translation": { + "encryption_configuration": { Type: schema.TypeList, Optional: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "owner": { + "replica_kms_key_id": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + ValidateFunc: verify.ValidARN, }, }, }, @@ -96,38 +106,38 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "metrics": { Type: schema.TypeList, Optional: true, - MinItems: 1, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), - }, "event_threshold": { Type: schema.TypeList, Required: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + // Currently, the S3 API only supports 15 minutes; + // however, to account for future changes, validation + // is left at positive integers. ValidateFunc: validation.IntAtLeast(0), }, }, }, }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), + }, }, }, }, "replication_time": { Type: schema.TypeList, Optional: true, - MinItems: 1, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { @@ -138,13 +148,15 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "time": { Type: schema.TypeList, Required: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + // Currently, the S3 API only supports 15 minutes; + // however, to account for future changes, validation + // is left at positive integers. ValidateFunc: validation.IntAtLeast(0), }, }, @@ -153,42 +165,67 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { }, }, }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + }, }, }, }, - "source_selection_criteria": { + "existing_object_replication": { Type: schema.TypeList, Optional: true, - MinItems: 1, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "sse_kms_encrypted_objects": { + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), + }, + }, + }, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "and": { Type: schema.TypeList, Optional: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "status": { + "prefix": { Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, + "tags": tftags.TagsSchema(), }, }, }, - "replica_modifications": { + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "tag": { Type: schema.TypeList, - Optional: true, - MinItems: 1, MaxItems: 1, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, }, }, }, @@ -196,65 +233,61 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { }, }, }, - "prefix": { + "id": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringLenBetween(0, 1024), + ValidateFunc: validation.StringLenBetween(0, 255), }, - "status": { + "prefix": { Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, "priority": { Type: schema.TypeInt, Optional: true, }, - "filter": { + "source_selection_criteria": { Type: schema.TypeList, Optional: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 1024), + "replica_modifications": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), + }, + }, + }, }, - "tags": tftags.TagsSchema(), - }, - }, - }, - "existing_object_replication": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), + "sse_kms_encrypted_objects": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), + }, + }, + }, }, }, }, }, - "delete_marker_replication": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), - }, - }, - }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), }, }, }, @@ -263,354 +296,100 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { } } -func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta interface{}) error { - // Get the bucket - var bucket string - if v, ok := d.GetOk("bucket"); ok { - bucket = v.(string) - } else { - log.Printf("[ERROR] S3 Bucket name not set") - return errors.New("[ERROR] S3 Bucket name not set") - } - d.SetId(bucket) - - return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) -} +func resourceBucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn -func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + bucket := d.Get("bucket").(string) - if _, ok := d.GetOk("bucket"); !ok { - // during import operations, use the supplied ID for the bucket name - d.Set("bucket", d.Id()) + rc := &s3.ReplicationConfiguration{ + Role: aws.String(d.Get("role").(string)), + Rules: ExpandRules(d.Get("rule").(*schema.Set).List()), } - var bucket *string - input := &s3.HeadBucketInput{} - if rsp, ok := d.GetOk("bucket"); !ok { - log.Printf("[ERROR] S3 Bucket name not set") - return errors.New("[ERROR] S3 Bucket name not set") - } else { - bucket = aws.String(rsp.(string)) - input.Bucket = bucket + input := &s3.PutBucketReplicationInput{ + Bucket: aws.String(bucket), + ReplicationConfiguration: rc, } - conn := meta.(*conns.AWSClient).S3Conn - - err := resource.Retry(bucketCreatedTimeout, func() *resource.RetryError { - _, err := conn.HeadBucket(input) - - if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { + err := resource.Retry(propagationTimeout, func() *resource.RetryError { + _, err := conn.PutBucketReplication(input) + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { return resource.RetryableError(err) } - - if d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - return resource.RetryableError(err) - } - if err != nil { return resource.NonRetryableError(err) } - return nil }) if tfresource.TimedOut(err) { - _, err = conn.HeadBucket(input) + _, err = conn.PutBucketReplication(input) } - if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { - log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) - return nil + if err != nil { + return fmt.Errorf("error creating S3 replication configuration for bucket (%s): %w", bucket, err) } - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) - return nil - } + d.SetId(bucket) - if err != nil { - return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) + return resourceBucketReplicationConfigurationRead(d, meta) +} + +func resourceBucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn + + input := &s3.GetBucketReplicationInput{ + Bucket: aws.String(d.Id()), } // Read the bucket replication configuration - replicationResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: bucket, - }) + output, err := retryWhenBucketNotFound(func() (interface{}, error) { + return conn.GetBucketReplication(input) }) - if err != nil && !tfawserr.ErrMessageContains(err, "ReplicationConfigurationNotFoundError", "") { - return fmt.Errorf("error getting S3 Bucket replication: %s", err) - } - replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) - if !ok || replication == nil { - return fmt.Errorf("error reading replication_configuration") - } - r := replication.ReplicationConfiguration - // set role - if r.Role != nil && aws.StringValue(r.Role) != "" { - d.Set("role", r.Role) - } - rules := make([]interface{}, 0, len(r.Rules)) - for _, v := range r.Rules { - t := make(map[string]interface{}) - if v.Destination != nil { - rd := make(map[string]interface{}) - if v.Destination.Bucket != nil { - rd["bucket"] = aws.StringValue(v.Destination.Bucket) - } - if v.Destination.StorageClass != nil { - rd["storage_class"] = aws.StringValue(v.Destination.StorageClass) - } - if v.Destination.EncryptionConfiguration != nil { - if v.Destination.EncryptionConfiguration.ReplicaKmsKeyID != nil { - rd["replica_kms_key_id"] = aws.StringValue(v.Destination.EncryptionConfiguration.ReplicaKmsKeyID) - } - } - if v.Destination.Account != nil { - rd["account_id"] = aws.StringValue(v.Destination.Account) - } - if v.Destination.AccessControlTranslation != nil { - rdt := map[string]interface{}{ - "owner": aws.StringValue(v.Destination.AccessControlTranslation.Owner), - } - rd["access_control_translation"] = []interface{}{rdt} - } - if v.Destination.ReplicationTime != nil { - drt := make(map[string]interface{}) - if v.Destination.ReplicationTime.Status != nil { - drt["status"] = aws.StringValue(v.Destination.ReplicationTime.Status) - drtm := make(map[string]interface{}) - drtm["minutes"] = aws.Int64Value(v.Destination.ReplicationTime.Time.Minutes) - drt["time"] = []interface{}{drtm} - rd["replication_time"] = []interface{}{drt} - } - } - if v.Destination.Metrics != nil { - dm := make(map[string]interface{}) - if v.Destination.Metrics.Status != nil { - dm["status"] = aws.StringValue(v.Destination.Metrics.Status) - dmetm := make(map[string]interface{}) - dmetm["minutes"] = aws.Int64Value(v.Destination.Metrics.EventThreshold.Minutes) - dm["event_threshold"] = []interface{}{dmetm} - rd["metrics"] = []interface{}{dm} - } - } - t["destination"] = []interface{}{rd} - } + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket Replication Configuration (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } - if v.ExistingObjectReplication != nil { - status := make(map[string]interface{}) - status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) - t["existing_object_replication"] = []interface{}{status} - } + if err != nil { + return fmt.Errorf("error getting S3 Bucket Replication Configuration for bucket (%s): %w", d.Id(), err) + } - if v.ID != nil { - t["id"] = aws.StringValue(v.ID) - } - if v.Prefix != nil { - t["prefix"] = aws.StringValue(v.Prefix) - } - if v.Status != nil { - t["status"] = aws.StringValue(v.Status) - } - if vssc := v.SourceSelectionCriteria; vssc != nil { - tssc := make(map[string]interface{}) - if vssc.SseKmsEncryptedObjects != nil { - tSseKms := make(map[string]interface{}) - tSseKms["status"] = aws.StringValue(vssc.SseKmsEncryptedObjects.Status) - tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} - } - t["source_selection_criteria"] = []interface{}{tssc} - } + replication, ok := output.(*s3.GetBucketReplicationOutput) - if v.Priority != nil { - t["priority"] = int(aws.Int64Value(v.Priority)) - } + if !ok || replication == nil || replication.ReplicationConfiguration == nil { + return fmt.Errorf("error reading S3 Bucket Replication Configuration for bucket (%s): empty output", d.Id()) + } - if f := v.Filter; f != nil { - m := map[string]interface{}{} - if f.Prefix != nil { - m["prefix"] = aws.StringValue(f.Prefix) - } - if t := f.Tag; t != nil { - m["tags"] = KeyValueTags([]*s3.Tag{t}).IgnoreAWS().Map() - } - if a := f.And; a != nil { - m["prefix"] = aws.StringValue(a.Prefix) - m["tags"] = KeyValueTags(a.Tags).IgnoreAWS().Map() - } - t["filter"] = []interface{}{m} - - if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil { - status := make(map[string]interface{}) - status["status"] = aws.StringValue(v.DeleteMarkerReplication.Status) - t["delete_marker_replication"] = []interface{}{status} - } - } + r := replication.ReplicationConfiguration - rules = append(rules, t) + d.Set("bucket", d.Id()) + d.Set("role", r.Role) + if err := d.Set("rule", schema.NewSet(rulesHash, FlattenRules(r.Rules))); err != nil { + return fmt.Errorf("error setting rule: %w", err) } - d.Set("rules", schema.NewSet(rulesHash, rules)) return nil } -func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*conns.AWSClient).S3Conn - bucket := d.Get("bucket").(string) - - rc := &s3.ReplicationConfiguration{} - if val, ok := d.GetOk("role"); ok { - rc.Role = aws.String(val.(string)) - } - - rcRules := d.Get("rules").(*schema.Set).List() - rules := []*s3.ReplicationRule{} - for _, v := range rcRules { - rr := v.(map[string]interface{}) - rcRule := &s3.ReplicationRule{} - if status, ok := rr["status"]; ok && status != "" { - rcRule.Status = aws.String(status.(string)) - } else { - continue - } - - if rrid, ok := rr["id"]; ok && rrid != "" { - rcRule.ID = aws.String(rrid.(string)) - } - - eor := rr["existing_object_replication"].([]interface{}) - if len(eor) > 0 { - s := eor[0].(map[string]interface{}) - rcRule.ExistingObjectReplication = &s3.ExistingObjectReplication{ - Status: aws.String(s["status"].(string)), - } - } - - ruleDestination := &s3.Destination{} - if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { - if dest[0] != nil { - bd := dest[0].(map[string]interface{}) - ruleDestination.Bucket = aws.String(bd["bucket"].(string)) - - if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { - ruleDestination.StorageClass = aws.String(storageClass.(string)) - } - - if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" { - ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ - ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)), - } - } - - if account, ok := bd["account_id"]; ok && account != "" { - ruleDestination.Account = aws.String(account.(string)) - } - - if aclTranslation, ok := bd["access_control_translation"].([]interface{}); ok && len(aclTranslation) > 0 { - aclTranslationValues := aclTranslation[0].(map[string]interface{}) - ruleAclTranslation := &s3.AccessControlTranslation{} - ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) - ruleDestination.AccessControlTranslation = ruleAclTranslation - } - - rt, ok := bd["replication_time"].([]interface{}) - if ok && len(rt) > 0 { - s := rt[0].(map[string]interface{}) - if t, ok := s["time"].([]interface{}); ok && len(t) > 0 { - m := t[0].(map[string]interface{}) - ruleDestination.ReplicationTime = &s3.ReplicationTime{ - Status: aws.String(s["status"].(string)), - Time: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(int64(m["minutes"].(int))), - }, - } - } - } - - rm, ok := bd["metrics"].([]interface{}) - if ok && len(rm) > 0 { - s := rm[0].(map[string]interface{}) - if et, ok := s["event_threshold"].([]interface{}); ok && len(et) > 0 { - m := et[0].(map[string]interface{}) - ruleDestination.Metrics = &s3.Metrics{ - Status: aws.String(s["status"].(string)), - EventThreshold: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(int64(m["minutes"].(int))), - }, - } - } - } - - } - } - rcRule.Destination = ruleDestination - - if ssc, ok := rr["source_selection_criteria"].([]interface{}); ok && len(ssc) > 0 { - if ssc[0] != nil { - sscValues := ssc[0].(map[string]interface{}) - ruleSsc := &s3.SourceSelectionCriteria{} - if sseKms, ok := sscValues["sse_kms_encrypted_objects"].([]interface{}); ok && len(sseKms) > 0 { - if sseKms[0] != nil { - sseKmsValues := sseKms[0].(map[string]interface{}) - sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} - sseKmsEncryptedObjects.Status = aws.String(sseKmsValues["status"].(string)) - ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects - } - } - if sscRm, ok := sscValues["replica_modifications"].([]interface{}); ok && len(sscRm) > 0 { - if sscRm[0] != nil { - replicaModValues := sscRm[0].(map[string]interface{}) - replicaModifications := &s3.ReplicaModifications{} - replicaModifications.Status = aws.String(replicaModValues["status"].(string)) - ruleSsc.ReplicaModifications = replicaModifications - } - } - rcRule.SourceSelectionCriteria = ruleSsc - } - } - - if f, ok := rr["filter"].([]interface{}); ok && len(f) > 0 && f[0] != nil { - // XML schema V2. - rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) - rcRule.Filter = &s3.ReplicationRuleFilter{} - filter := f[0].(map[string]interface{}) - tags := Tags(tftags.New(filter["tags"]).IgnoreAWS()) - if len(tags) > 0 { - rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ - Prefix: aws.String(filter["prefix"].(string)), - Tags: tags, - } - } else { - rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) - } - - dmr, ok := rr["delete_marker_replication"].([]interface{}) - if ok && len(dmr) > 0 { - s := dmr[0].(map[string]interface{}) - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(s["status"].(string)), - } - } - } else { - // XML schema V1. - rcRule.Prefix = aws.String(rr["prefix"].(string)) - } +func resourceBucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn - rules = append(rules, rcRule) + rc := &s3.ReplicationConfiguration{ + Role: aws.String(d.Get("role").(string)), + Rules: ExpandRules(d.Get("rule").(*schema.Set).List()), } - rc.Rules = rules - i := &s3.PutBucketReplicationInput{ - Bucket: aws.String(bucket), + input := &s3.PutBucketReplicationInput{ + Bucket: aws.String(d.Id()), ReplicationConfiguration: rc, } - log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := s3conn.PutBucketReplication(i) - if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + err := resource.Retry(propagationTimeout, func() *resource.RetryError { + _, err := conn.PutBucketReplication(input) + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { return resource.RetryableError(err) } if err != nil { @@ -618,29 +397,33 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } return nil }) + if tfresource.TimedOut(err) { - _, err = s3conn.PutBucketReplication(i) + _, err = conn.PutBucketReplication(input) } + if err != nil { - return fmt.Errorf("Error putting S3 replication configuration: %s", err) + return fmt.Errorf("error updating S3 replication configuration for bucket (%s): %w", d.Id(), err) } - return resourceAwsS3BucketReplicationConfigurationRead(d, meta) + return resourceBucketReplicationConfigurationRead(d, meta) } -func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*conns.AWSClient).S3Conn - bucket := d.Get("bucket").(string) +func resourceBucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn - log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) + input := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(d.Id()), + } + + _, err := conn.DeleteBucketReplication(input) - dbri := &s3.DeleteBucketReplicationInput{ - Bucket: aws.String(bucket), + if tfawserr.ErrCodeEquals(err, ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + return nil } - _, err := s3conn.DeleteBucketReplication(dbri) if err != nil { - return fmt.Errorf("Error removing S3 bucket replication: %s", err) + return fmt.Errorf("error deleting S3 bucket replication configuration for bucket (%s): %w", d.Id(), err) } return nil diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 39dbc1e1697..13c8819b518 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -2,11 +2,7 @@ package s3_test import ( "fmt" - "reflect" - "sort" - "strings" "testing" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -17,16 +13,17 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_basic(t *testing.T) { + iamRoleResourceName := "aws_iam_role.test" + dstBucketResourceName := "aws_s3_bucket.destination" + kmsKeyResourceName := "aws_kms_key.test" + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ @@ -39,83 +36,91 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Config: testAccBucketReplicationConfigurationBasic(rName, s3.StorageClassStandard), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.StorageClassStandard), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfig(rInt, "GLACIER"), + Config: testAccBucketReplicationConfigurationBasic(rName, s3.StorageClassGlacier), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.StorageClassGlacier), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassGlacier, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(rInt), + Config: testAccBucketReplicationConfigurationWithSseKmsEncryptedObjects(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - EncryptionConfiguration: &s3.EncryptionConfiguration{ - ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - SourceSelectionCriteria: &s3.SourceSelectionCriteria{ - SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ - Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), - }, - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.encryption_configuration.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + "source_selection_criteria.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": s3.SseKmsEncryptedObjectsStatusEnabled, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.encryption_configuration.0.replica_kms_key_id", kmsKeyResourceName, "arn"), + ), + }, + }, + }) +} + +func TestAccS3BucketReplicationConfiguration_disappears(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfigurationBasic(rName, s3.StorageClassStandard), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3.ResourceBucketReplicationConfiguration(), resourceName), ), + ExpectNonEmptyPlan: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_multipleDestinationsEmptyFilter(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -130,52 +135,51 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + Config: testAccBucketReplicationConfigurationWithMultipleDestinationsEmptyFilter(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": "STANDARD", + "destination.0.storage_class": s3.StorageClassStandard, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": "STANDARD_IA", + "destination.0.storage_class": s3.StorageClassStandardIa, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule3", "priority": "3", - "status": "Disabled", + "status": s3.ReplicationRuleStatusDisabled, "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": "ONEZONE_IA", + "destination.0.storage_class": s3.StorageClassOnezoneIa, }), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_multipleDestinationsNonEmptyFilter(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -190,44 +194,43 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + Config: testAccBucketReplicationConfigurationWithMultipleDestinationsNonEmptyFilter(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": "STANDARD", + "destination.0.storage_class": s3.StorageClassStandard, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix2", "destination.#": "1", - "destination.0.storage_class": "STANDARD_IA", + "destination.0.storage_class": s3.StorageClassStandardIa, }), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { +func TestAccS3BucketReplicationConfiguration_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -242,45 +245,46 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + Config: testAccBucketReplicationConfigurationWithMultipleDestinationsTwoDestination(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": "STANDARD", + "destination.0.storage_class": s3.StorageClassStandard, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": "STANDARD_IA", + "destination.0.storage_class": s3.StorageClassStandardIa, }), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAccessControlTranslation(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + callerIdentityDataSourceName := "data.aws_caller_identity.current" + iamRoleResourceName := "aws_iam_role.test" + dstBucketResourceName := "aws_s3_bucket.destination" + kmsKeyResourceName := "aws_kms_key.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -295,80 +299,69 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Config: testAccBucketReplicationConfigurationWithAccessControlTranslation(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - AccessControlTranslation: &s3.AccessControlTranslation{ - Owner: aws.String("Destination"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.access_control_translation.#": "1", + "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, { - Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), + Config: testAccBucketReplicationConfigurationWithSseKmsEncryptedObjectsAndAccessControlTranslation(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - EncryptionConfiguration: &s3.EncryptionConfiguration{ - ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), - }, - AccessControlTranslation: &s3.AccessControlTranslation{ - Owner: aws.String("Destination"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - SourceSelectionCriteria: &s3.SourceSelectionCriteria{ - SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ - Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), - }, - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.access_control_translation.#": "1", + "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, + "destination.0.encryption_configuration.#": "1", + "source_selection_criteria.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": s3.SseKmsEncryptedObjectsStatusEnabled, + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.encryption_configuration.0.replica_kms_key_id", kmsKeyResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 -func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + callerIdentityDataSourceName := "data.aws_caller_identity.current" + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -383,68 +376,60 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), + Config: testAccBucketReplicationConfigurationRulesDestination(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Config: testAccBucketReplicationConfigurationWithAccessControlTranslation(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - AccessControlTranslation: &s3.AccessControlTranslation{ - Owner: aws.String("Destination"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.access_control_translation.#": "1", + "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_replicationTimeControl(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -459,52 +444,45 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigRTC(rInt), + Config: testAccBucketReplicationConfigurationRTC(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Priority: aws.Int64(0), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - ReplicationTime: &s3.ReplicationTime{ - Status: aws.String(s3.ReplicationTimeStatusEnabled), - Time: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(15), - }, - }, - Metrics: &s3.Metrics{ - Status: aws.String(s3.MetricsStatusEnabled), - EventThreshold: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(15), - }, - }, - }, - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "filter.#": "1", + "filter.0.prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "destination.#": "1", + "destination.0.replication_time.#": "1", + "destination.0.replication_time.0.status": s3.ReplicationTimeStatusEnabled, + "destination.0.replication_time.0.time.#": "1", + "destination.0.replication_time.0.time.0.minutes": "15", + "destination.0.metrics.#": "1", + //"destination.0.metrics.0.status": s3.MetricsStatusEnabled, + //"destination.0.metrics.0.event_threshold.#": "1", + //"destination.0.metrics.0.event_threshold.0.minutes": "15", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_replicaModifications(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -519,44 +497,41 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), + Config: testAccBucketReplicationConfigurationReplicaMods(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Priority: aws.Int64(0), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - }, - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - SourceSelectionCriteria: &s3.SourceSelectionCriteria{ - ReplicaModifications: &s3.ReplicaModifications{ - Status: aws.String(s3.ReplicaModificationsStatusEnabled), - }, - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "filter.#": "1", + "filter.0.prefix": "foo", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "source_selection_criteria.#": "1", + "source_selection_criteria.0.replica_modifications.#": "1", + "source_selection_criteria.0.replica_modifications.0.status": s3.ReplicaModificationsStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 -func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_withoutStorageClass(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -571,25 +546,34 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - Check: resource.ComposeTestCheckFunc(), + Config: testAccBucketReplicationConfigurationWithoutStorageClass(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), }, { - Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_schemaV2(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -604,48 +588,39 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + Config: testAccBucketReplicationConfigurationWithV2ConfigurationNoTags(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "filter.#": "1", + "filter.0.prefix": "foo", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { - resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := sdkacctest.RandInt() +func TestAccS3BucketReplicationConfiguration_schemaV2SameRegion(t *testing.T) { rName := sdkacctest.RandomWithPrefix("tf-acc-test") rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -657,57 +632,41 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), + Config: testAccBucketReplicationConfiguration_schemaV2SameRegion(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( - acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("testid"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("testprefix"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - }, - }, - ), + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "testid", + "filter.#": "1", + "filter.0.prefix": "testprefix", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "force_destroy", "acl"}, }, }, }) } -const isExistingObjectReplicationBlocked = true +func TestAccS3BucketReplicationConfiguration_existingObjectReplication(t *testing.T) { + t.Skipf("skipping test: AWS Technical Support request required to allow ExistingObjectReplication") -func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { - if isExistingObjectReplicationBlocked { - /* https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication - A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. - Once that request is approved, this can be unblocked for testing. */ - return - } - resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := sdkacctest.RandInt() - rName := sdkacctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -719,94 +678,114 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + Config: testAccBucketReplicationConfiguration_existingObjectReplication(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( - acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("testid"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("testprefix"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - ExistingObjectReplication: &s3.ExistingObjectReplication{ - Status: aws.String(s3.ExistingObjectReplicationStatusEnabled), - }, - }, - }, - ), + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "testid", + "filter.#": "1", + "filter.0.prefix": "testprefix", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "existing_object_replication.#": "1", + "existing_object_replication.0.status": s3.ExistingObjectReplicationStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "force_destroy", "acl"}, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" - - testDeleted := func(r string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[r] - if ok { - return fmt.Errorf("Replication resource configuration %q should have been deleted.", r) - } - return nil - } - } +func TestAccS3BucketReplicationConfiguration_filter_tagFilter(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckMultipleRegion(t, 2) + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfiguration_filter_tag(rName, "testkey", "testvalue"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "filter.0.tag.#": "1", + "filter.0.tag.0.key": "testkey", + "filter.0.tag.0.value": "testvalue", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, + }) +} + +func TestAccS3BucketReplicationConfiguration_filter_andOperator(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Config: testAccBucketReplicationConfiguration_filter_andOperator_prefixAndTags(rName, "testkey1", "testvalue1", "testkey2", "testvalue2"), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.StorageClassStandard), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "filter.0.and.#": "1", + "filter.0.and.0.prefix": "foo", + "filter.0.and.0.tags.%": "2", + "filter.0.and.0.tags.testkey1": "testvalue1", + "filter.0.and.0.tags.testkey2": "testvalue2", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { @@ -815,113 +794,99 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSS3BucketReplicationConfigBasic(rInt), - Check: resource.ComposeTestCheckFunc(testDeleted(resourceName)), + Config: testAccBucketReplicationConfiguration_filter_andOperator_tags(rName, "testkey1", "testvalue1", "testkey2", "testvalue2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "filter.0.and.#": "1", + "filter.0.and.0.tags.%": "2", + "filter.0.and.0.tags.testkey1": "testvalue1", + "filter.0.and.0.tags.testkey2": "testvalue2", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func testAccCheckBucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs := s.RootModule().Resources[n] - for _, rule := range rules { - if dest := rule.Destination; dest != nil { - if account := dest.Account; account != nil && strings.HasPrefix(aws.StringValue(dest.Account), "${") { - resourceReference := strings.Replace(aws.StringValue(dest.Account), "${", "", 1) - resourceReference = strings.Replace(resourceReference, "}", "", 1) - resourceReferenceParts := strings.Split(resourceReference, ".") - resourceAttribute := resourceReferenceParts[len(resourceReferenceParts)-1] - resourceName := strings.Join(resourceReferenceParts[:len(resourceReferenceParts)-1], ".") - value := s.RootModule().Resources[resourceName].Primary.Attributes[resourceAttribute] - dest.Account = aws.String(value) - } - if ec := dest.EncryptionConfiguration; ec != nil { - if ec.ReplicaKmsKeyID != nil { - key_arn := s.RootModule().Resources["aws_kms_key.replica"].Primary.Attributes["arn"] - ec.ReplicaKmsKeyID = aws.String(strings.Replace(*ec.ReplicaKmsKeyID, "${aws_kms_key.replica.arn}", key_arn, -1)) - } - } - } - // Sort filter tags by key. - if filter := rule.Filter; filter != nil { - if and := filter.And; and != nil { - if tags := and.Tags; tags != nil { - sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) - } - } - } +func testAccCheckReplicationConfigDestroy(s *terraform.State, provider *schema.Provider) error { + conn := provider.Meta().(*conns.AWSClient).S3Conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket_replication_configuration" { + continue } + input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn - out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: aws.String(rs.Primary.ID), + output, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketReplication(input) }) - if err != nil && tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") { - return fmt.Errorf("S3 bucket not found") - } else if err != nil && rules == nil { - return nil - } else if err != nil { - return fmt.Errorf("GetReplicationConfiguration error: %v", err) - } - for _, rule := range out.ReplicationConfiguration.Rules { - // Sort filter tags by key. - if filter := rule.Filter; filter != nil { - if and := filter.And; and != nil { - if tags := and.Tags; tags != nil { - sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) - } - } - } + if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + continue } - if !reflect.DeepEqual(out.ReplicationConfiguration.Rules, rules) { - return fmt.Errorf("bad replication rules, expected: %v, got %v", rules, out.ReplicationConfiguration.Rules) + + if err != nil { + return err } - return nil + if replication, ok := output.(*s3.GetBucketReplicationOutput); ok && replication != nil && replication.ReplicationConfiguration != nil { + return fmt.Errorf("S3 Replication Configuration for bucket (%s) still exists", rs.Primary.ID) + } } -} -func testAccCheckReplicationConfigDestroy(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*conns.AWSClient).S3Conn + return nil +} - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_bucket_replication_configuration" { - continue +func testAccCheckBucketReplicationConfigurationExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) } - input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.GetBucketReplication(input) - if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "NotFound", "") { - return nil - } + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } - if err != nil { - return resource.NonRetryableError(err) - } + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn - return resource.RetryableError(fmt.Errorf("AWS S3 Bucket Replication Configuration still exists: %s", rs.Primary.ID)) + output, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(rs.Primary.ID), }) - if tfresource.TimedOut(err) { - _, err = conn.GetBucketReplication(input) - } - if err != nil { return err } + + if output == nil || output.ReplicationConfiguration == nil { + return fmt.Errorf("S3 Bucket Replication Configuration for bucket (%s) not found", rs.Primary.ID) + } + + return nil } - return nil } -func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { +func testAccBucketReplicationConfigurationBase(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} -resource "aws_iam_role" "role" { - name = "tf-iam-role-replication-%[1]d" +resource "aws_iam_role" "test" { + name = %[1]q assume_role_policy = < 0 && v[0] != nil { + result.AccessControlTranslation = ExpandAccessControlTranslation(v) + } + + if v, ok := tfMap["account"].(string); ok && v != "" { + result.Account = aws.String(v) + } + + if v, ok := tfMap["bucket"].(string); ok && v != "" { + result.Bucket = aws.String(v) + } + + if v, ok := tfMap["encryption_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.EncryptionConfiguration = ExpandEncryptionConfiguration(v) + } + + if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.Metrics = ExpandMetrics(v) + } + + if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.ReplicationTime = ExpandReplicationTime(v) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + result.StorageClass = aws.String(v) + } + + return result +} + +func ExpandExistingObjectReplication(l []interface{}) *s3.ExistingObjectReplication { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ExistingObjectReplication{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandFilter(l []interface{}) *s3.ReplicationRuleFilter { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationRuleFilter{} + + if v, ok := tfMap["and"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.And = ExpandReplicationRuleAndOperator(v) + } + + if v, ok := tfMap["prefix"].(string); ok && v != "" { + result.Prefix = aws.String(v) + } + + if v, ok := tfMap["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + tags := Tags(tftags.New(v[0]).IgnoreAWS()) + if len(tags) > 0 { + result.Tag = tags[0] + } + } + + return result +} + +func ExpandMetrics(l []interface{}) *s3.Metrics { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.Metrics{} + + if v, ok := tfMap["event_threshold"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.EventThreshold = ExpandReplicationTimeValue(v) + } + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandReplicationRuleAndOperator(l []interface{}) *s3.ReplicationRuleAndOperator { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationRuleAndOperator{} + + if v, ok := tfMap["prefix"].(string); ok && v != "" { + result.Prefix = aws.String(v) + } + + if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { + tags := Tags(tftags.New(v).IgnoreAWS()) + if len(tags) > 0 { + result.Tags = tags + } + } + + return result +} + +func ExpandReplicationTime(l []interface{}) *s3.ReplicationTime { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationTime{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + if v, ok := tfMap["time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.Time = ExpandReplicationTimeValue(v) + } + + return result +} + +func ExpandReplicationTimeValue(l []interface{}) *s3.ReplicationTimeValue { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationTimeValue{} + + if v, ok := tfMap["minutes"].(int); ok { + result.Minutes = aws.Int64(int64(v)) + } + + return result +} + +func ExpandReplicaModifications(l []interface{}) *s3.ReplicaModifications { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicaModifications{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandRules(l []interface{}) []*s3.ReplicationRule { + var rules []*s3.ReplicationRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + rule := &s3.ReplicationRule{} + + if v, ok := tfMap["delete_marker_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.DeleteMarkerReplication = ExpandDeleteMarkerReplication(v) + } + + if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.Destination = ExpandDestination(v) + } + + if v, ok := tfMap["existing_object_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.ExistingObjectReplication = ExpandExistingObjectReplication(v) + } + + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.Filter = ExpandFilter(v) + } + + if v, ok := tfMap["id"].(string); ok && v != "" { + rule.ID = aws.String(v) + } + + if v, ok := tfMap["prefix"].(string); ok && v != "" { + rule.Prefix = aws.String(v) + } + + if v, ok := tfMap["priority"].(int); ok && rule.Filter != nil { + rule.Priority = aws.Int64(int64(v)) + } + + if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.SourceSelectionCriteria = ExpandSourceSelectionCriteria(v) + } + + if v, ok := tfMap["status"].(string); ok && v != "" { + rule.Status = aws.String(v) + } + + rules = append(rules, rule) + } + + return rules +} + +func ExpandSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.SourceSelectionCriteria{} + + if v, ok := tfMap["replica_modifications"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.ReplicaModifications = ExpandReplicaModifications(v) + } + + if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.SseKmsEncryptedObjects = ExpandSseKmsEncryptedObjects(v) + } + + return result +} + +func ExpandSseKmsEncryptedObjects(l []interface{}) *s3.SseKmsEncryptedObjects { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.SseKmsEncryptedObjects{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandTag(l []interface{}) *s3.Tag { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.Tag{} + + if v, ok := tfMap["key"].(string); ok && v != "" { + result.Key = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + result.Value = aws.String(v) + } + + return result +} + +func FlattenAccessControlTranslation(act *s3.AccessControlTranslation) []interface{} { + if act == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if act.Owner != nil { + m["owner"] = aws.StringValue(act.Owner) + } + + return []interface{}{m} +} + +func FlattenEncryptionConfiguration(ec *s3.EncryptionConfiguration) []interface{} { + if ec == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if ec.ReplicaKmsKeyID != nil { + m["replica_kms_key_id"] = aws.StringValue(ec.ReplicaKmsKeyID) + } + + return []interface{}{m} +} + +func FlattenDeleteMarkerReplication(dmr *s3.DeleteMarkerReplication) []interface{} { + if dmr == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if dmr.Status != nil { + m["status"] = aws.StringValue(dmr.Status) + } + + return []interface{}{m} +} + +func FlattenDestination(dest *s3.Destination) []interface{} { + if dest == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if dest.AccessControlTranslation != nil { + m["access_control_translation"] = FlattenAccessControlTranslation(dest.AccessControlTranslation) + } + + if dest.Account != nil { + m["account"] = aws.StringValue(dest.Account) + } + + if dest.Bucket != nil { + m["bucket"] = aws.StringValue(dest.Bucket) + } + + if dest.EncryptionConfiguration != nil { + m["encryption_configuration"] = FlattenEncryptionConfiguration(dest.EncryptionConfiguration) + } + + if dest.Metrics != nil { + m["metrics"] = FlattenMetrics(dest.Metrics) + } + + if dest.ReplicationTime != nil { + m["replication_time"] = FlattenReplicationTime(dest.ReplicationTime) + } + + if dest.StorageClass != nil { + m["storage_class"] = aws.StringValue(dest.StorageClass) + } + + return []interface{}{m} +} + +func FlattenExistingObjectReplication(eor *s3.ExistingObjectReplication) []interface{} { + if eor == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if eor.Status != nil { + m["status"] = aws.StringValue(eor.Status) + } + + return []interface{}{m} +} + +func FlattenFilter(filter *s3.ReplicationRuleFilter) []interface{} { + if filter == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if filter.And != nil { + m["and"] = FlattenReplicationRuleAndOperator(filter.And) + } + + if filter.Prefix != nil { + m["prefix"] = aws.StringValue(filter.Prefix) + } + + if filter.Tag != nil { + tag := KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() + m["tag"] = []interface{}{tag} + } + + return []interface{}{m} +} + +func FlattenMetrics(metrics *s3.Metrics) []interface{} { + if metrics == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if metrics.EventThreshold != nil { + m["event_threshold"] = FlattenReplicationTimeValue(metrics.EventThreshold) + } + + if metrics.Status != nil { + m["status"] = aws.StringValue(metrics.Status) + } + + return []interface{}{m} +} + +func FlattenReplicationTime(rt *s3.ReplicationTime) []interface{} { + if rt == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if rt.Status != nil { + m["status"] = aws.StringValue(rt.Status) + } + + if rt.Time != nil { + m["time"] = FlattenReplicationTimeValue(rt.Time) + } + + return []interface{}{m} + +} + +func FlattenReplicationTimeValue(rtv *s3.ReplicationTimeValue) []interface{} { + if rtv == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if rtv.Minutes != nil { + m["minutes"] = int(aws.Int64Value(rtv.Minutes)) + } + + return []interface{}{m} +} + +func FlattenRules(rules []*s3.ReplicationRule) []interface{} { + if len(rules) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, rule := range rules { + if rule == nil { + continue + } + + m := make(map[string]interface{}) + + if rule.DeleteMarkerReplication != nil { + m["delete_marker_replication"] = FlattenDeleteMarkerReplication(rule.DeleteMarkerReplication) + } + + if rule.Destination != nil { + m["destination"] = FlattenDestination(rule.Destination) + } + + if rule.ExistingObjectReplication != nil { + m["existing_object_replication"] = FlattenExistingObjectReplication(rule.ExistingObjectReplication) + } + + if rule.Filter != nil { + m["filter"] = FlattenFilter(rule.Filter) + } + + if rule.ID != nil { + m["id"] = aws.StringValue(rule.ID) + } + + if rule.Prefix != nil { + m["prefix"] = aws.StringValue(rule.Prefix) + } + + if rule.Priority != nil { + m["priority"] = int(aws.Int64Value(rule.Priority)) + } + + if rule.SourceSelectionCriteria != nil { + m["source_selection_criteria"] = FlattenSourceSelectionCriteria(rule.SourceSelectionCriteria) + } + + if rule.Status != nil { + m["status"] = aws.StringValue(rule.Status) + } + + results = append(results, m) + } + + return results +} + +func FlattenReplicaModifications(rc *s3.ReplicaModifications) []interface{} { + if rc == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if rc.Status != nil { + m["status"] = aws.StringValue(rc.Status) + } + + return []interface{}{m} +} + +func FlattenReplicationRuleAndOperator(op *s3.ReplicationRuleAndOperator) []interface{} { + if op == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if op.Prefix != nil { + m["prefix"] = aws.StringValue(op.Prefix) + } + + if op.Tags != nil { + m["tags"] = KeyValueTags(op.Tags).IgnoreAWS().Map() + } + + return []interface{}{m} + +} + +func FlattenSourceSelectionCriteria(ssc *s3.SourceSelectionCriteria) []interface{} { + if ssc == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if ssc.ReplicaModifications != nil { + m["replica_modifications"] = FlattenReplicaModifications(ssc.ReplicaModifications) + } + + if ssc.SseKmsEncryptedObjects != nil { + m["sse_kms_encrypted_objects"] = FlattenSseKmsEncryptedObjects(ssc.SseKmsEncryptedObjects) + } + + return []interface{}{m} +} + +func FlattenSseKmsEncryptedObjects(objects *s3.SseKmsEncryptedObjects) []interface{} { + if objects == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if objects.Status != nil { + m["status"] = aws.StringValue(objects.Status) + } + + return []interface{}{m} +} From 875cd9df213ccb2b7522825f62bfa9656b5af026 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 17 Nov 2021 21:21:36 -0500 Subject: [PATCH 79/80] CR updates: docs --- website/docs/r/s3_bucket.html.markdown | 4 +- ...et_replication_configuration.html.markdown | 186 ++++++++++++------ 2 files changed, 125 insertions(+), 65 deletions(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 57bb495ebd7..0d07f54b221 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -178,7 +178,7 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using replication configuration -~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. +~> **NOTE:** See the [`aws_s3_bucket_replication_configuration` resource](/docs/providers/aws/r/s3_bucket_replication_configuration.html) to support bi-directional replication configuration and additional features. ```terraform provider "aws" { @@ -438,7 +438,7 @@ The `noncurrent_version_transition` object supports the following The `replication_configuration` object supports the following: -~> **NOTE:** See the `aws_s3_bucket_replication_configuration` resource documentation to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. +~> **NOTE:** See the [`aws_s3_bucket_replication_configuration` resource documentation](/docs/providers/aws/r/s3_bucket_replication_configuration.html) to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. ``` lifecycle { diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 8de6f01c14e..d1820a83d47 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -14,7 +14,7 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration -``` +```terraform provider "aws" { region = "eu-west-1" } @@ -107,6 +107,7 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + lifecycle { ignore_changes = [ replication_configuration @@ -117,7 +118,8 @@ resource "aws_s3_bucket" "source" { resource "aws_s3_bucket_replication_configuration" "replication" { role = aws_iam_role.replication.arn bucket = aws_s3_bucket.source.id - rules { + + rule { id = "foobar" prefix = "foo" status = "Enabled" @@ -132,7 +134,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication -``` +```terraform # ... other configuration ... resource "aws_s3_bucket" "east" { @@ -167,7 +169,8 @@ resource "aws_s3_bucket" "west" { resource "aws_s3_bucket_replication_configuration" "east_to_west" { role = aws_iam_role.east_replication.arn bucket = aws_s3_bucket.east.id - rules { + + rule { id = "foobar" prefix = "foo" status = "Enabled" @@ -182,7 +185,8 @@ resource "aws_s3_bucket_replication_configuration" "east_to_west" { resource "aws_s3_bucket_replication_configuration" "west_to_east" { role = aws_iam_role.west_replication.arn bucket = aws_s3_bucket.west.id - rules { + + rule { id = "foobar" prefix = "foo" status = "Enabled" @@ -199,7 +203,7 @@ resource "aws_s3_bucket_replication_configuration" "west_to_east" { ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the [`aws_s3_bucket` resource](/docs/providers/aws/r/s3_bucket.html). To avoid conflicts or unexpected apply results, a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. ``` lifecycle { @@ -209,87 +213,114 @@ lifecycle { } ``` -The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: +The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the [`aws_s3_bucket` resource](/docs/providers/aws/r/s3_bucket.html): * `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) * `metrics` - Added to the `destination` configuration object [documented below](#metrics) * `replication_time` - Added to the `destination` configuration object [documented below](#replication_time) * `existing_object_replication` - Added to the replication rule object [documented below](#existing_object_replication) -Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) - +Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication). ## Argument Reference -The `replication_configuration` resource supports the following: +The following arguments are supported: * `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. -* `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). +* `rule` - (Required) Set of configuration blocks describing the rules managing the replication [documented below](#rule). -### rules +### rule -~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rule` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. ~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. -The `rules` object supports the following: - -With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +The `rule` configuration block supports the following arguments: -* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `delete_marker_replication` - (Optional) Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when `filter` is used)[documented below](#delete_marker_replication). * `destination` - (Required) Specifies the destination for the rule [documented below](#destination). +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. -* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### existing_object_replication +### delete_marker_replication -~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) +~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. -The `existing_object_replication` object supports the following: +~> **NOTE:** This argument is only available with V2 replication configurations. ``` -existing_object_replication { +delete_marker_replication { status = "Enabled" } ``` -* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. +The `delete_marker_replication` configuration block supports the following arguments: +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. -### delete_marker_replication +### destination -~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. +The `destination` configuration block supports the following arguments: -~> **NOTE:** This argument is only available with V2 replication configurations. +* `access_control_translation` - (Optional) A configuration block that specifies the overrides to use for object owners on replication [documented below](#access_control_translation). Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account` owner override configuration. +* `account` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. +* `encryption_configuration` - (Optional) A configuration block that provides information about encryption [documented below](#encryption_configuration). If `source_selection_criteria` is specified, you must specify this element. +* `metrics` - (Optional) A configuration block that specifies replication metrics-related settings enabling replication metrics and events [documented below](#metrics). +* `replication_time` - (Optional) A configuration block that specifies S3 Replication Time Control (S3 RTC), including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated [documented below](#replication_time). Replication Time Control must be used in conjunction with `metrics`. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. -The `delete_marker_replication` object supports the following: +### access_control_translation ``` -delete_marker_replication { +access_control_translation { + owner = "Destination" +} +``` + +The `access_control_translation` configuration block supports the following arguments: + +* `owner` - (Required) Specifies the replica ownership. For default and valid values, see [PUT bucket replication](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) in the Amazon S3 API Reference. Valid values: `Destination`. + +### encryption_configuration + +``` +encryption_configuration { + replica_kms_key_id = "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" +} +``` + +The `encryption_configuration` configuration block supports the following arguments: + +* `replica_kms_key_id` - (Required) The ID (Key ARN or Alias ARN) of the customer managed AWS KMS key stored in AWS Key Management Service (KMS) for the destination bucket. + +### metrics + +``` +metrics { + event_threshold { + minutes = 15 + } status = "Enabled" } ``` -* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. +The `metrics` configuration block supports the following arguments: +* `event_threshold` - (Required) A configuration block that specifies the time threshold for emitting the `s3:Replication:OperationMissedThreshold` event [documented below](#event_threshold). +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. -### destination -The `destination` object supports the following: +### event_threshold -* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. -* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. -* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with - `sse_kms_encrypted_objects` source selection criteria. -* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account_id` owner override configuration. -* `account_id` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. -* `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). -* `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). +The `event_threshold` configuration block supports the following arguments: + +* `minutes` - (Required) Time in minutes. Valid values: `15`. ### replication_time @@ -302,34 +333,60 @@ replication_time { } ``` -The `replication_time` object supports the following: +The `replication_time` configuration block supports the following arguments: * `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. -* `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. +* `time` - (Required) A configuration block specifying the time by which replication should be complete for all objects and operations on objects [documented below](#time). -### metrics +### time + +The `time` configuration block supports the following arguments: + +* `minutes` - (Required) Time in minutes. Valid values: `15`. + +### existing_object_replication + +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) ``` -metrics { +existing_object_replication { status = "Enabled" - event_threshold { - minutes = 15 - } } ``` -The `metrics` object supports the following: +The `existing_object_replication` configuration block supports the following arguments: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. -* `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. -### source_selection_criteria +### filter + +~> **NOTE:** With the `filter` argument, you must specify exactly one of `prefix`, `tag`, or `and`. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +The `filter` configuration block supports the following arguments: + +* `and` - (Optional) A configuration block for specifying rule filters. This element is required only if you specify more than one filter. See [and](#and) below for more details. +* `prefix` - (Optional) An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tag` - (Optional) A configuration block for specifying a tag key and value [documented below](#tag). + +### and + +The `and` configuration block supports the following arguments: + +* `prefix` - (Optional) An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional, Required if `prefix` is configured) A map of tags (key and value pairs) that identifies a subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -The `source_selection_criteria` object supports the following: +### tag + +The `tag` configuration block supports the following arguments: + +* `key` - (Required) Name of the object key. +* `value` - (Required) Value of the tag. + +### source_selection_criteria ``` source_selection_criteria { - replica_modification { + replica_modifications { status = "Enabled" } sse_kms_encrypted_objects { @@ -338,28 +395,31 @@ source_selection_criteria { } ``` - ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. +The `source_selection_criteria` configuration block supports the following arguments: -* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. +~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the [`aws_s3_bucket` resource](/docs/providers/aws/r/s3_bucket.html). -* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. +* `replica_modifications` - (Optional) A configuration block that you can specify for selections for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when `filter` is specified), you can specify this element and set the status to `Enabled` to replicate modifications on replicas. +* `sse_kms_encrypted_objects` - (Optional) A configuration block for filter information for the selection of Amazon S3 objects encrypted with AWS KMS. If specified, `replica_kms_key_id` in `destination` `encryption_configuration` must be specified as well. -### filter +### replica_modifications -The `filter` object supports the following: +The `replica_modifications` configuration block supports the following arguments: -* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. -* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. -The rule applies only to objects having all the tags in its tagset. +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. + +### sse_kms_encrypted_objects + +The `sse_kms_encrypted_objects` configuration block supports the following arguments: + +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. ## Attributes Reference In addition to all arguments above, the following attributes are exported: -* id - Resource id is the s3 source bucket name. +* `id` - The S3 source bucket name. ## Import From 557ff34cb75be68a135bb3cf7f3ccf0bab9b83fc Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 17 Nov 2021 22:59:08 -0500 Subject: [PATCH 80/80] retain Set hashing --- internal/service/s3/bucket_replication_configuration.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index ec27a3fdc4d..4b577b43e79 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -41,7 +41,7 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "rule": { Type: schema.TypeSet, Required: true, - //Set: rulesHash, + Set: rulesHash, MaxItems: 1000, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{